go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-api\-machinery\]\sServers\swith\ssupport\sfor\sAPI\schunking\sshould\ssupport\scontinue\slisting\sfrom\sthe\slast\skey\sif\sthe\soriginal\sversion\shas\sbeen\scompacted\saway\,\sthough\sthe\slist\sis\sinconsistent\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000f5c780) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-api-machinery] Servers with support for API chunking set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:40:28.097 Nov 26 08:40:28.097: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename chunking 11/26/22 08:40:28.099 Nov 26 08:40:28.138: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:42:34.218: INFO: Unexpected error: <*fmt.wrapError | 0xc0011bc7c0>: { msg: "wait for service account \"default\" in namespace \"chunking-7822\": timed out waiting for the condition", err: <*errors.errorString | 0xc00017da10>{ s: "timed out waiting for the condition", }, } Nov 26 08:42:34.218: FAIL: wait for service account "default" in namespace "chunking-7822": timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000f5c780) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-api-machinery] Servers with support for API chunking test/e2e/framework/node/init/init.go:32 Nov 26 08:42:34.218: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] Servers with support for API chunking dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:42:34.301 STEP: Collecting events from namespace "chunking-7822". 11/26/22 08:42:34.301 STEP: Found 0 events. 11/26/22 08:42:34.343 Nov 26 08:42:34.387: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 08:42:34.387: INFO: Nov 26 08:42:34.437: INFO: Logging node info for node bootstrap-e2e-master Nov 26 08:42:34.509: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master ba8d154d-f7d1-4d02-b950-a084eb625244 7245 0 2022-11-26 08:28:16 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 08:38:48 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:38:48 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:38:48 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:38:48 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:38:48 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.83.96.51,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bf41ff823483c389ad7dfd0c1ce16b06,SystemUUID:bf41ff82-3483-c389-ad7d-fd0c1ce16b06,BootID:5db3fc62-e7bb-4715-a04e-2bdf5328dbc8,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:42:34.509: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 08:42:34.721: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 08:42:34.994: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-26 08:27:49 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:34.995: INFO: Container kube-addon-manager ready: true, restart count 3 Nov 26 08:42:34.995: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-26 08:27:49 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:34.995: INFO: Container l7-lb-controller ready: false, restart count 5 Nov 26 08:42:34.995: INFO: metadata-proxy-v0.1-xx7th started at 2022-11-26 08:28:23 +0000 UTC (0+2 container statuses recorded) Nov 26 08:42:34.995: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 08:42:34.995: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 08:42:34.995: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:34.995: INFO: Container etcd-container ready: true, restart count 2 Nov 26 08:42:34.995: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:34.995: INFO: Container etcd-container ready: true, restart count 1 Nov 26 08:42:34.995: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:34.995: INFO: Container konnectivity-server-container ready: true, restart count 2 Nov 26 08:42:34.995: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:34.995: INFO: Container kube-controller-manager ready: false, restart count 5 Nov 26 08:42:34.995: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:34.995: INFO: Container kube-scheduler ready: true, restart count 4 Nov 26 08:42:34.995: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:34.995: INFO: Container kube-apiserver ready: true, restart count 2 Nov 26 08:42:37.122: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 08:42:37.122: INFO: Logging node info for node bootstrap-e2e-minion-group-327c Nov 26 08:42:37.169: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-327c 2792e2db-d60b-4a1a-b593-202ac7a81c7e 8047 0 2022-11-26 08:28:14 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-327c kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-327c topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-2303":"bootstrap-e2e-minion-group-327c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {node-problem-detector Update v1 2022-11-26 08:38:21 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-26 08:38:38 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 08:42:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-327c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:38:21 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:38:21 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:38:21 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:38:21 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:21 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:21 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:21 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:24 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:37:55 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:37:55 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:37:55 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:37:55 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.233.8,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:d5273c9e67ce602a784d93fca00549e5,SystemUUID:d5273c9e-67ce-602a-784d-93fca00549e5,BootID:6b605594-03f1-4a39-9bc1-bb9fc688da43,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692,DevicePath:,},},Config:nil,},} Nov 26 08:42:37.169: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-327c Nov 26 08:42:37.385: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-327c Nov 26 08:42:37.999: INFO: csi-mockplugin-0 started at 2022-11-26 08:34:51 +0000 UTC (0+4 container statuses recorded) Nov 26 08:42:37.999: INFO: Container busybox ready: false, restart count 4 Nov 26 08:42:37.999: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 08:42:37.999: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container mock ready: false, restart count 5 Nov 26 08:42:37.999: INFO: csi-mockplugin-0 started at 2022-11-26 08:31:07 +0000 UTC (0+4 container statuses recorded) Nov 26 08:42:37.999: INFO: Container busybox ready: true, restart count 6 Nov 26 08:42:37.999: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container mock ready: false, restart count 5 Nov 26 08:42:37.999: INFO: hostexec-bootstrap-e2e-minion-group-327c-6xj2s started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 08:42:37.999: INFO: csi-mockplugin-0 started at 2022-11-26 08:30:43 +0000 UTC (0+3 container statuses recorded) Nov 26 08:42:37.999: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 08:42:37.999: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 08:42:37.999: INFO: Container mock ready: false, restart count 6 Nov 26 08:42:37.999: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 08:30:43 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 08:42:37.999: INFO: csi-mockplugin-0 started at 2022-11-26 08:30:43 +0000 UTC (0+3 container statuses recorded) Nov 26 08:42:37.999: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 08:42:37.999: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 08:42:37.999: INFO: Container mock ready: false, restart count 6 Nov 26 08:42:37.999: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:31:06 +0000 UTC (0+7 container statuses recorded) Nov 26 08:42:37.999: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container hostpath ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 08:42:37.999: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 08:42:37.999: INFO: l7-default-backend-8549d69d99-b5jrs started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 08:42:37.999: INFO: konnectivity-agent-mmmgd started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container konnectivity-agent ready: false, restart count 6 Nov 26 08:42:37.999: INFO: csi-mockplugin-0 started at 2022-11-26 08:37:21 +0000 UTC (0+4 container statuses recorded) Nov 26 08:42:37.999: INFO: Container busybox ready: true, restart count 1 Nov 26 08:42:37.999: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 08:42:37.999: INFO: Container driver-registrar ready: true, restart count 1 Nov 26 08:42:37.999: INFO: Container mock ready: true, restart count 1 Nov 26 08:42:37.999: INFO: metadata-proxy-v0.1-w74pw started at 2022-11-26 08:28:15 +0000 UTC (0+2 container statuses recorded) Nov 26 08:42:37.999: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 08:42:37.999: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 08:42:37.999: INFO: netserver-0 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container webserver ready: true, restart count 5 Nov 26 08:42:37.999: INFO: pvc-volume-tester-6957f started at 2022-11-26 08:33:17 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container volume-tester ready: false, restart count 0 Nov 26 08:42:37.999: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:37:11 +0000 UTC (0+7 container statuses recorded) Nov 26 08:42:37.999: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 08:42:37.999: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 08:42:37.999: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 08:42:37.999: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 08:42:37.999: INFO: Container hostpath ready: true, restart count 2 Nov 26 08:42:37.999: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 08:42:37.999: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 08:42:37.999: INFO: kube-dns-autoscaler-5f6455f985-tnj96 started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container autoscaler ready: false, restart count 6 Nov 26 08:42:37.999: INFO: coredns-6d97d5ddb-cz84m started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container coredns ready: false, restart count 7 Nov 26 08:42:37.999: INFO: coredns-6d97d5ddb-q6tzt started at 2022-11-26 08:28:28 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container coredns ready: true, restart count 7 Nov 26 08:42:37.999: INFO: pod-43d27f70-d941-4117-b96d-c563fc43297f started at 2022-11-26 08:39:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:42:37.999: INFO: kube-proxy-bootstrap-e2e-minion-group-327c started at 2022-11-26 08:28:14 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 08:42:37.999: INFO: pod-d4f0fe4d-227a-40e8-929b-a033a1faef35 started at 2022-11-26 08:39:35 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:42:37.999: INFO: volume-snapshot-controller-0 started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:37.999: INFO: Container volume-snapshot-controller ready: true, restart count 6 Nov 26 08:42:38.860: INFO: Latency metrics for node bootstrap-e2e-minion-group-327c Nov 26 08:42:38.860: INFO: Logging node info for node bootstrap-e2e-minion-group-lz41 Nov 26 08:42:38.902: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-lz41 2e433af5-3311-4285-97fa-cde6a9a5b261 7703 0 2022-11-26 08:28:20 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-lz41 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-lz41 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-27":"bootstrap-e2e-minion-group-lz41"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:32:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:38:27 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:39:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-lz41,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:38:27 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:27 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:27 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:27 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:38:27 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:38:27 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:38:27 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:39:25 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:39:25 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:39:25 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:39:25 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.83.179.153,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:aa52222b7f092a36e5e364be7d47d224,SystemUUID:aa52222b-7f09-2a36-e5e3-64be7d47d224,BootID:74a831b5-c273-4958-8b03-2d43808117f5,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9 kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},},Config:nil,},} Nov 26 08:42:38.903: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-lz41 Nov 26 08:42:38.946: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-lz41 Nov 26 08:42:39.202: INFO: nfs-server started at 2022-11-26 08:38:49 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.202: INFO: Container nfs-server ready: true, restart count 3 Nov 26 08:42:39.202: INFO: pod-6d904d9d-47d9-4612-b9ba-75195733a8e3 started at 2022-11-26 08:32:05 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.202: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:42:39.202: INFO: konnectivity-agent-8v4r5 started at 2022-11-26 08:28:34 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.202: INFO: Container konnectivity-agent ready: true, restart count 6 Nov 26 08:42:39.202: INFO: netserver-1 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.202: INFO: Container webserver ready: false, restart count 6 Nov 26 08:42:39.202: INFO: pod-secrets-237c2934-d670-464d-9497-3fea99e7afae started at 2022-11-26 08:35:10 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.202: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 08:42:39.202: INFO: pod-back-off-image started at 2022-11-26 08:36:35 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.202: INFO: Container back-off ready: false, restart count 5 Nov 26 08:42:39.202: INFO: metadata-proxy-v0.1-gtpkq started at 2022-11-26 08:28:22 +0000 UTC (0+2 container statuses recorded) Nov 26 08:42:39.202: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 08:42:39.202: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 08:42:39.202: INFO: metrics-server-v0.5.2-867b8754b9-q5chn started at 2022-11-26 08:28:48 +0000 UTC (0+2 container statuses recorded) Nov 26 08:42:39.202: INFO: Container metrics-server ready: false, restart count 6 Nov 26 08:42:39.202: INFO: Container metrics-server-nanny ready: false, restart count 6 Nov 26 08:42:39.202: INFO: back-off-cap started at 2022-11-26 08:33:28 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.202: INFO: Container back-off-cap ready: false, restart count 6 Nov 26 08:42:39.202: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:31:57 +0000 UTC (0+7 container statuses recorded) Nov 26 08:42:39.202: INFO: Container csi-attacher ready: true, restart count 5 Nov 26 08:42:39.202: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 08:42:39.202: INFO: Container csi-resizer ready: true, restart count 5 Nov 26 08:42:39.202: INFO: Container csi-snapshotter ready: true, restart count 5 Nov 26 08:42:39.202: INFO: Container hostpath ready: true, restart count 5 Nov 26 08:42:39.202: INFO: Container liveness-probe ready: true, restart count 5 Nov 26 08:42:39.202: INFO: Container node-driver-registrar ready: true, restart count 5 Nov 26 08:42:39.202: INFO: kube-proxy-bootstrap-e2e-minion-group-lz41 started at 2022-11-26 08:28:20 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.202: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 08:42:39.651: INFO: Latency metrics for node bootstrap-e2e-minion-group-lz41 Nov 26 08:42:39.651: INFO: Logging node info for node bootstrap-e2e-minion-group-s7dx Nov 26 08:42:39.694: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-s7dx 94f7d8ed-1dd6-4a3f-9454-b3d54cd8c750 7677 0 2022-11-26 08:28:22 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-s7dx kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-s7dx topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:33:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 08:38:29 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:39:16 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-s7dx,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:38:29 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:38:29 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:29 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:29 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:38:29 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:38:29 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:38:29 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:39:16 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:39:16 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:39:16 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:39:16 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.127.51.136,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7318864e323f36191a9cc6aee4e5582d,SystemUUID:7318864e-323f-3619-1a9c-c6aee4e5582d,BootID:247dfbca-e301-45ca-b5e7-bc2da79a6926,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:42:39.695: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-s7dx Nov 26 08:42:39.745: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-s7dx Nov 26 08:42:39.813: INFO: metadata-proxy-v0.1-m5q9x started at 2022-11-26 08:28:23 +0000 UTC (0+2 container statuses recorded) Nov 26 08:42:39.813: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 08:42:39.813: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 08:42:39.813: INFO: kube-proxy-bootstrap-e2e-minion-group-s7dx started at 2022-11-26 08:28:22 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.813: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 08:42:39.813: INFO: konnectivity-agent-m6kcz started at 2022-11-26 08:28:34 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.813: INFO: Container konnectivity-agent ready: false, restart count 5 Nov 26 08:42:39.813: INFO: netserver-2 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.813: INFO: Container webserver ready: false, restart count 4 Nov 26 08:42:39.813: INFO: csi-mockplugin-0 started at 2022-11-26 08:31:19 +0000 UTC (0+4 container statuses recorded) Nov 26 08:42:39.813: INFO: Container busybox ready: false, restart count 5 Nov 26 08:42:39.813: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 08:42:39.813: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 08:42:39.813: INFO: Container mock ready: false, restart count 5 Nov 26 08:42:39.813: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-fjbz2 started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.813: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 08:42:39.813: INFO: pod-subpath-test-inlinevolume-4dbw started at 2022-11-26 08:39:23 +0000 UTC (1+2 container statuses recorded) Nov 26 08:42:39.813: INFO: Init container init-volume-inlinevolume-4dbw ready: true, restart count 2 Nov 26 08:42:39.813: INFO: Container test-container-subpath-inlinevolume-4dbw ready: false, restart count 4 Nov 26 08:42:39.813: INFO: Container test-container-volume-inlinevolume-4dbw ready: true, restart count 4 Nov 26 08:42:39.813: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-8q696 started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.813: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 08:42:39.813: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-bb2lb started at 2022-11-26 08:38:41 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.813: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 08:42:39.813: INFO: pod-645ca7c3-0782-49c3-9f8c-fe2716c177bc started at 2022-11-26 08:39:40 +0000 UTC (0+1 container statuses recorded) Nov 26 08:42:39.813: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:42:40.087: INFO: Latency metrics for node bootstrap-e2e-minion-group-s7dx [DeferCleanup (Each)] [sig-api-machinery] Servers with support for API chunking tear down framework | framework.go:193 STEP: Destroying namespace "chunking-7822" for this suite. 11/26/22 08:42:40.087
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sCronJob\sshould\snot\sschedule\sjobs\swhen\ssuspended\s\[Slow\]\s\[Conformance\]$'
test/e2e/apps/cronjob.go:111 k8s.io/kubernetes/test/e2e/apps.glob..func2.2() test/e2e/apps/cronjob.go:111 +0x376 There were additional failures detected after the initial failure: [FAILED] Nov 26 08:39:59.189: failed to list events in namespace "cronjob-1581": Get "https://34.83.96.51/api/v1/namespaces/cronjob-1581/events": dial tcp 34.83.96.51:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 08:39:59.229: Couldn't delete ns: "cronjob-1581": Delete "https://34.83.96.51/api/v1/namespaces/cronjob-1581": dial tcp 34.83.96.51:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.83.96.51/api/v1/namespaces/cronjob-1581", Err:(*net.OpError)(0xc0038ec3c0)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:37:39.691 Nov 26 08:37:39.691: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename cronjob 11/26/22 08:37:39.692 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 08:38:37.746 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 08:38:37.859 [BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 [It] should not schedule jobs when suspended [Slow] [Conformance] test/e2e/apps/cronjob.go:96 STEP: Creating a suspended cronjob 11/26/22 08:38:38.985 STEP: Ensuring no jobs are scheduled 11/26/22 08:38:39.029 STEP: Ensuring no job exists by listing jobs explicitly 11/26/22 08:39:59.07 Nov 26 08:39:59.109: INFO: Unexpected error: Failed to list the CronJobs in namespace cronjob-1581: <*url.Error | 0xc0048fe000>: { Op: "Get", URL: "https://34.83.96.51/apis/batch/v1/namespaces/cronjob-1581/jobs", Err: <*net.OpError | 0xc0038f80a0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0014f4660>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 83, 96, 51], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc0014e8020>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 08:39:59.109: FAIL: Failed to list the CronJobs in namespace cronjob-1581: Get "https://34.83.96.51/apis/batch/v1/namespaces/cronjob-1581/jobs": dial tcp 34.83.96.51:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/apps.glob..func2.2() test/e2e/apps/cronjob.go:111 +0x376 [AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 Nov 26 08:39:59.110: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:39:59.149 STEP: Collecting events from namespace "cronjob-1581". 11/26/22 08:39:59.149 Nov 26 08:39:59.188: INFO: Unexpected error: failed to list events in namespace "cronjob-1581": <*url.Error | 0xc0048fe4b0>: { Op: "Get", URL: "https://34.83.96.51/api/v1/namespaces/cronjob-1581/events", Err: <*net.OpError | 0xc0038f8550>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0014f4ed0>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 83, 96, 51], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc0014e8a40>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 08:39:59.189: FAIL: failed to list events in namespace "cronjob-1581": Get "https://34.83.96.51/api/v1/namespaces/cronjob-1581/events": dial tcp 34.83.96.51:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc0016a45c0, {0xc0035ccbe0, 0xc}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc00289a9c0}, {0xc0035ccbe0, 0xc}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc0016a4650?, {0xc0035ccbe0?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc0006e9860) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc00174d3b0?, 0xc001817fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc00435b408?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc00174d3b0?, 0x29449fc?}, {0xae73300?, 0xc001817f80?, 0x2d56635?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 STEP: Destroying namespace "cronjob-1581" for this suite. 11/26/22 08:39:59.189 Nov 26 08:39:59.229: FAIL: Couldn't delete ns: "cronjob-1581": Delete "https://34.83.96.51/api/v1/namespaces/cronjob-1581": dial tcp 34.83.96.51:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.83.96.51/api/v1/namespaces/cronjob-1581", Err:(*net.OpError)(0xc0038ec3c0)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc0006e9860) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc00174d2f0?, 0xc00356cfb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc00174d2f0?, 0x0?}, {0xae73300?, 0x5?, 0xc0035cc640?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sStatefulSet\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sScaling\sshould\shappen\sin\spredictable\sorder\sand\shalt\sif\sany\sstateful\spod\sis\sunhealthy\s\[Slow\]\s\[Conformance\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0011941e0) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:52:45.055 Nov 26 08:52:45.055: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename statefulset 11/26/22 08:52:45.057 Nov 26 08:52:45.096: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:47.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:49.135: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:51.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:53.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:55.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:57.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:59.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:01.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:03.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:05.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:07.135: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:09.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:11.135: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:13.136: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:15.135: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:15.175: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:15.175: INFO: Unexpected error: <*errors.errorString | 0xc000195d10>: { s: "timed out waiting for the condition", } Nov 26 08:53:15.175: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0011941e0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 Nov 26 08:53:15.175: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:53:15.214 [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-auth\]\sServiceAccounts\sshould\ssupport\sInClusterConfig\swith\stoken\srotation\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0011943c0) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:42:41.068 Nov 26 08:42:41.069: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename svcaccounts 11/26/22 08:42:41.111 Nov 26 08:44:41.212: INFO: Unexpected error: <*fmt.wrapError | 0xc0047a4020>: { msg: "wait for service account \"default\" in namespace \"svcaccounts-9298\": timed out waiting for the condition", err: <*errors.errorString | 0xc0001cb9c0>{ s: "timed out waiting for the condition", }, } Nov 26 08:44:41.212: FAIL: wait for service account "default" in namespace "svcaccounts-9298": timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0011943c0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 Nov 26 08:44:41.212: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:45:04.229 STEP: Collecting events from namespace "svcaccounts-9298". 11/26/22 08:45:04.229 STEP: Found 0 events. 11/26/22 08:45:04.277 Nov 26 08:45:04.327: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 08:45:04.327: INFO: Nov 26 08:45:04.381: INFO: Logging node info for node bootstrap-e2e-master Nov 26 08:45:04.428: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master ba8d154d-f7d1-4d02-b950-a084eb625244 8247 0 2022-11-26 08:28:16 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 08:43:57 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.83.96.51,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bf41ff823483c389ad7dfd0c1ce16b06,SystemUUID:bf41ff82-3483-c389-ad7d-fd0c1ce16b06,BootID:5db3fc62-e7bb-4715-a04e-2bdf5328dbc8,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:45:04.429: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 08:45:04.532: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 08:45:04.581: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-master: error trying to reach service: No agent available Nov 26 08:45:04.581: INFO: Logging node info for node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.632: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-327c 2792e2db-d60b-4a1a-b593-202ac7a81c7e 8328 0 2022-11-26 08:28:14 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-327c kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-327c topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-2303":"bootstrap-e2e-minion-group-327c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:38:38 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:43:22 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-327c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:24 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.233.8,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:d5273c9e67ce602a784d93fca00549e5,SystemUUID:d5273c9e-67ce-602a-784d-93fca00549e5,BootID:6b605594-03f1-4a39-9bc1-bb9fc688da43,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692,DevicePath:,},},Config:nil,},} Nov 26 08:45:04.633: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.723: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.815: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-327c: error trying to reach service: No agent available Nov 26 08:45:04.815: INFO: Logging node info for node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:04.862: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-lz41 2e433af5-3311-4285-97fa-cde6a9a5b261 8330 0 2022-11-26 08:28:20 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-lz41 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-lz41 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:32:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:43:28 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:32 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-lz41,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.83.179.153,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:aa52222b7f092a36e5e364be7d47d224,SystemUUID:aa52222b-7f09-2a36-e5e3-64be7d47d224,BootID:74a831b5-c273-4958-8b03-2d43808117f5,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9 kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},},Config:nil,},} Nov 26 08:45:04.863: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:04.935: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:05.096: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-lz41: error trying to reach service: No agent available Nov 26 08:45:05.096: INFO: Logging node info for node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:05.148: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-s7dx 94f7d8ed-1dd6-4a3f-9454-b3d54cd8c750 8308 0 2022-11-26 08:28:22 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-s7dx kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-s7dx topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:33:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 08:43:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:23 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-s7dx,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.127.51.136,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7318864e323f36191a9cc6aee4e5582d,SystemUUID:7318864e-323f-3619-1a9c-c6aee4e5582d,BootID:247dfbca-e301-45ca-b5e7-bc2da79a6926,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:45:05.148: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:05.202: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:05.248: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-s7dx: error trying to reach service: No agent available [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 STEP: Destroying namespace "svcaccounts-9298" for this suite. 11/26/22 08:45:05.248
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\sclient\sSimple\spod\sshould\sreturn\scommand\sexit\scodes\s\[Slow\]\srunning\sa\sfailing\scommand\swithout\s\-\-restart\=Never\,\sbut\swith\s\-\-rm$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc00126e2d0) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:42:41.098 Nov 26 08:42:41.099: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename kubectl 11/26/22 08:42:41.101 Nov 26 08:44:41.212: INFO: Unexpected error: <*fmt.wrapError | 0xc0003ec000>: { msg: "wait for service account \"default\" in namespace \"kubectl-9225\": timed out waiting for the condition", err: <*errors.errorString | 0xc0001c9a00>{ s: "timed out waiting for the condition", }, } Nov 26 08:44:41.213: FAIL: wait for service account "default" in namespace "kubectl-9225": timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc00126e2d0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 Nov 26 08:44:41.213: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:45:04.221 STEP: Collecting events from namespace "kubectl-9225". 11/26/22 08:45:04.221 STEP: Found 0 events. 11/26/22 08:45:04.263 Nov 26 08:45:04.307: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 08:45:04.307: INFO: Nov 26 08:45:04.353: INFO: Logging node info for node bootstrap-e2e-master Nov 26 08:45:04.400: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master ba8d154d-f7d1-4d02-b950-a084eb625244 8247 0 2022-11-26 08:28:16 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 08:43:57 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.83.96.51,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bf41ff823483c389ad7dfd0c1ce16b06,SystemUUID:bf41ff82-3483-c389-ad7d-fd0c1ce16b06,BootID:5db3fc62-e7bb-4715-a04e-2bdf5328dbc8,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:45:04.400: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 08:45:04.462: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 08:45:04.564: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-master: error trying to reach service: No agent available Nov 26 08:45:04.564: INFO: Logging node info for node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.617: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-327c 2792e2db-d60b-4a1a-b593-202ac7a81c7e 8328 0 2022-11-26 08:28:14 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-327c kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-327c topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-2303":"bootstrap-e2e-minion-group-327c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:38:38 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:43:22 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-327c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:24 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.233.8,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:d5273c9e67ce602a784d93fca00549e5,SystemUUID:d5273c9e-67ce-602a-784d-93fca00549e5,BootID:6b605594-03f1-4a39-9bc1-bb9fc688da43,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692,DevicePath:,},},Config:nil,},} Nov 26 08:45:04.618: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.688: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.734: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-327c: error trying to reach service: No agent available Nov 26 08:45:04.734: INFO: Logging node info for node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:04.780: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-lz41 2e433af5-3311-4285-97fa-cde6a9a5b261 8330 0 2022-11-26 08:28:20 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-lz41 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-lz41 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:32:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:43:28 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:32 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-lz41,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.83.179.153,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:aa52222b7f092a36e5e364be7d47d224,SystemUUID:aa52222b-7f09-2a36-e5e3-64be7d47d224,BootID:74a831b5-c273-4958-8b03-2d43808117f5,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9 kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},},Config:nil,},} Nov 26 08:45:04.780: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:04.832: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:04.878: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-lz41: error trying to reach service: No agent available Nov 26 08:45:04.878: INFO: Logging node info for node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:04.923: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-s7dx 94f7d8ed-1dd6-4a3f-9454-b3d54cd8c750 8308 0 2022-11-26 08:28:22 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-s7dx kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-s7dx topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:33:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 08:43:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:23 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-s7dx,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.127.51.136,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7318864e323f36191a9cc6aee4e5582d,SystemUUID:7318864e-323f-3619-1a9c-c6aee4e5582d,BootID:247dfbca-e301-45ca-b5e7-bc2da79a6926,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:45:04.923: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:05.140: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:05.217: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-s7dx: error trying to reach service: No agent available [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 STEP: Destroying namespace "kubectl-9225" for this suite. 11/26/22 08:45:05.217
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cloud\-provider\-gcp\]\sAddon\supdate\sshould\spropagate\sadd\-on\sfile\schanges\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc001221d10) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-cloud-provider-gcp] Addon update set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:42:41.162 Nov 26 08:42:41.162: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename addon-update-test 11/26/22 08:42:41.165 Nov 26 08:44:41.230: INFO: Unexpected error: <*fmt.wrapError | 0xc0008fc600>: { msg: "wait for service account \"default\" in namespace \"addon-update-test-8846\": timed out waiting for the condition", err: <*errors.errorString | 0xc000205d60>{ s: "timed out waiting for the condition", }, } Nov 26 08:44:41.230: FAIL: wait for service account "default" in namespace "addon-update-test-8846": timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc001221d10) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-cloud-provider-gcp] Addon update test/e2e/framework/node/init/init.go:32 Nov 26 08:44:41.231: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-cloud-provider-gcp] Addon update test/e2e/cloud/gcp/addon_update.go:237 [DeferCleanup (Each)] [sig-cloud-provider-gcp] Addon update dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:45:04.215 STEP: Collecting events from namespace "addon-update-test-8846". 11/26/22 08:45:04.215 STEP: Found 0 events. 11/26/22 08:45:04.256 Nov 26 08:45:04.297: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 08:45:04.297: INFO: Nov 26 08:45:04.339: INFO: Logging node info for node bootstrap-e2e-master Nov 26 08:45:04.396: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master ba8d154d-f7d1-4d02-b950-a084eb625244 8247 0 2022-11-26 08:28:16 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 08:43:57 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:43:57 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.83.96.51,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bf41ff823483c389ad7dfd0c1ce16b06,SystemUUID:bf41ff82-3483-c389-ad7d-fd0c1ce16b06,BootID:5db3fc62-e7bb-4715-a04e-2bdf5328dbc8,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:45:04.396: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 08:45:04.445: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 08:45:04.492: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-master: error trying to reach service: No agent available Nov 26 08:45:04.492: INFO: Logging node info for node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.553: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-327c 2792e2db-d60b-4a1a-b593-202ac7a81c7e 8328 0 2022-11-26 08:28:14 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-327c kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-327c topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-2303":"bootstrap-e2e-minion-group-327c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:38:38 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:43:22 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-327c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:22 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:24 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:43:02 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.233.8,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:d5273c9e67ce602a784d93fca00549e5,SystemUUID:d5273c9e-67ce-602a-784d-93fca00549e5,BootID:6b605594-03f1-4a39-9bc1-bb9fc688da43,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692,DevicePath:,},},Config:nil,},} Nov 26 08:45:04.554: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.610: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-327c Nov 26 08:45:04.654: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-327c: error trying to reach service: No agent available Nov 26 08:45:04.654: INFO: Logging node info for node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:04.700: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-lz41 2e433af5-3311-4285-97fa-cde6a9a5b261 8330 0 2022-11-26 08:28:20 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-lz41 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-lz41 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:32:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:43:28 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:32 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-lz41,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:44:32 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.83.179.153,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:aa52222b7f092a36e5e364be7d47d224,SystemUUID:aa52222b-7f09-2a36-e5e3-64be7d47d224,BootID:74a831b5-c273-4958-8b03-2d43808117f5,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9 kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},},Config:nil,},} Nov 26 08:45:04.701: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:04.776: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-lz41 Nov 26 08:45:04.842: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-lz41: error trying to reach service: No agent available Nov 26 08:45:04.842: INFO: Logging node info for node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:04.893: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-s7dx 94f7d8ed-1dd6-4a3f-9454-b3d54cd8c750 8308 0 2022-11-26 08:28:22 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-s7dx kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-s7dx topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:33:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 08:43:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:44:23 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-s7dx,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:43:30 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:44:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.127.51.136,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7318864e323f36191a9cc6aee4e5582d,SystemUUID:7318864e-323f-3619-1a9c-c6aee4e5582d,BootID:247dfbca-e301-45ca-b5e7-bc2da79a6926,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:45:04.893: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:05.141: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-s7dx Nov 26 08:45:05.211: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-s7dx: error trying to reach service: No agent available [DeferCleanup (Each)] [sig-cloud-provider-gcp] Addon update tear down framework | framework.go:193 STEP: Destroying namespace "addon-update-test-8846" for this suite. 11/26/22 08:45:05.211
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\sonly\starget\snodes\swith\sendpoints$'
test/e2e/framework/service/util.go:48 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc00383a250, 0xd}, 0x1f91, {0xae73300, 0x0, 0x0}, 0xc003641de8?) test/e2e/framework/service/util.go:48 +0x265 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1404 +0x737from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:48:35.372 Nov 26 08:48:35.372: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 08:48:35.373 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 08:48:35.614 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 08:48:35.732 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should only target nodes with endpoints test/e2e/network/loadbalancer.go:1346 STEP: creating a service esipp-7299/external-local-nodes with type=LoadBalancer 11/26/22 08:48:36.183 STEP: setting ExternalTrafficPolicy=Local 11/26/22 08:48:36.183 STEP: waiting for loadbalancer for service esipp-7299/external-local-nodes 11/26/22 08:48:36.76 Nov 26 08:48:36.760: INFO: Waiting up to 15m0s for service "external-local-nodes" to have a LoadBalancer STEP: waiting for loadbalancer for service esipp-7299/external-local-nodes 11/26/22 08:49:14.92 Nov 26 08:49:14.921: INFO: Waiting up to 15m0s for service "external-local-nodes" to have a LoadBalancer STEP: Performing setup for networking test in namespace esipp-7299 11/26/22 08:49:15.002 STEP: creating a selector 11/26/22 08:49:15.003 STEP: Creating the service pods in kubernetes 11/26/22 08:49:15.003 Nov 26 08:49:15.003: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Nov 26 08:49:15.379: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "esipp-7299" to be "running and ready" Nov 26 08:49:15.431: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 51.943068ms Nov 26 08:49:15.431: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 08:49:17.523: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.144505612s Nov 26 08:49:17.523: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:19.506: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.127619919s Nov 26 08:49:19.506: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:21.483: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.104488319s Nov 26 08:49:21.483: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:23.496: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.117346281s Nov 26 08:49:23.496: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:25.504: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.125510317s Nov 26 08:49:25.504: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:27.521: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.142604198s Nov 26 08:49:27.521: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:29.488: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.109664081s Nov 26 08:49:29.488: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:31.489: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.109909814s Nov 26 08:49:31.489: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:33.539: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.159938307s Nov 26 08:49:33.539: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:35.491: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.112573672s Nov 26 08:49:35.491: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:49:37.507: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.12858745s Nov 26 08:49:37.507: INFO: The phase of Pod netserver-0 is Running (Ready = true) Nov 26 08:49:37.507: INFO: Pod "netserver-0" satisfied condition "running and ready" Nov 26 08:49:37.600: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "esipp-7299" to be "running and ready" Nov 26 08:49:37.658: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 58.333191ms Nov 26 08:49:37.658: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:39.718: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2.117934135s Nov 26 08:49:39.718: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:41.714: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 4.11399017s Nov 26 08:49:41.714: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:43.713: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 6.113003605s Nov 26 08:49:43.713: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:45.707: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 8.106626153s Nov 26 08:49:45.707: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:47.716: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 10.116495277s Nov 26 08:49:47.717: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:49.725: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 12.125107716s Nov 26 08:49:49.725: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:51.725: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 14.124742836s Nov 26 08:49:51.725: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:53.740: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 16.140204802s Nov 26 08:49:53.740: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:55.733: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 18.133073782s Nov 26 08:49:55.733: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 08:49:57.738: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 20.138335645s Nov 26 08:49:57.738: INFO: The phase of Pod netserver-1 is Running (Ready = true) Nov 26 08:49:57.738: INFO: Pod "netserver-1" satisfied condition "running and ready" Nov 26 08:49:57.830: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "esipp-7299" to be "running and ready" Nov 26 08:49:57.919: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 88.933907ms Nov 26 08:49:57.919: INFO: The phase of Pod netserver-2 is Running (Ready = true) Nov 26 08:49:57.919: INFO: Pod "netserver-2" satisfied condition "running and ready" STEP: Creating test pods 11/26/22 08:49:58.01 Nov 26 08:49:58.258: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "esipp-7299" to be "running" Nov 26 08:49:58.333: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 74.545549ms Nov 26 08:50:00.388: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.129171129s Nov 26 08:50:00.388: INFO: Pod "test-container-pod" satisfied condition "running" Nov 26 08:50:00.440: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 STEP: Getting node addresses 11/26/22 08:50:00.44 Nov 26 08:50:00.440: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable STEP: Creating the service on top of the pods in kubernetes 11/26/22 08:50:00.6 Nov 26 08:50:00.785: INFO: Service node-port-service in namespace esipp-7299 found. Nov 26 08:50:01.038: INFO: Service session-affinity-service in namespace esipp-7299 found. STEP: Waiting for NodePort service to expose endpoint 11/26/22 08:50:01.137 Nov 26 08:50:02.138: INFO: Waiting for amount of service:node-port-service endpoints to be 3 STEP: Waiting for Session Affinity service to expose endpoint 11/26/22 08:50:02.346 Nov 26 08:50:03.347: INFO: Waiting for amount of service:session-affinity-service endpoints to be 3 STEP: creating a pod to be part of the service external-local-nodes on node bootstrap-e2e-minion-group-327c 11/26/22 08:50:03.437 Nov 26 08:50:03.524: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 08:50:03.583: INFO: Found all 1 pods Nov 26 08:50:03.583: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [external-local-nodes-rdhj6] Nov 26 08:50:03.583: INFO: Waiting up to 2m0s for pod "external-local-nodes-rdhj6" in namespace "esipp-7299" to be "running and ready" Nov 26 08:50:03.703: INFO: Pod "external-local-nodes-rdhj6": Phase="Pending", Reason="", readiness=false. Elapsed: 120.034866ms Nov 26 08:50:03.703: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodes-rdhj6' on 'bootstrap-e2e-minion-group-327c' to be 'Running' but was 'Pending' Nov 26 08:50:05.816: INFO: Pod "external-local-nodes-rdhj6": Phase="Running", Reason="", readiness=false. Elapsed: 2.23322911s Nov 26 08:50:05.816: INFO: Error evaluating pod condition running and ready: pod 'external-local-nodes-rdhj6' on 'bootstrap-e2e-minion-group-327c' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:50:03 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:50:05 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:50:05 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:50:03 +0000 UTC }] Nov 26 08:50:07.761: INFO: Pod "external-local-nodes-rdhj6": Phase="Running", Reason="", readiness=true. Elapsed: 4.178574827s Nov 26 08:50:07.761: INFO: Pod "external-local-nodes-rdhj6" satisfied condition "running and ready" Nov 26 08:50:07.761: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [external-local-nodes-rdhj6] STEP: waiting for service endpoint on node bootstrap-e2e-minion-group-327c 11/26/22 08:50:07.761 Nov 26 08:50:07.847: INFO: Pod for service esipp-7299/external-local-nodes is on node bootstrap-e2e-minion-group-327c Nov 26 08:50:07.847: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:07.887: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:09.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:09.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:11.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:21.888: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:50:21.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:21.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:23.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:23.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:25.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:25.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:27.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:27.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:29.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:29.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:31.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:31.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:33.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:33.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:35.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:45.888: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:50:47.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:47.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:49.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:49.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:51.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:51.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:53.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:53.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:55.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:55.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:57.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:57.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:50:59.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:50:59.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:01.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:01.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:03.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:03.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:05.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:05.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:07.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:07.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:09.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:09.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:11.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:11.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:13.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:13.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:15.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:25.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:51:27.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:27.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:29.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:29.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:31.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:31.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:33.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:43.888: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:51:45.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:45.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:47.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:47.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:49.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:49.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:51.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:51:51.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:51:53.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:03.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:52:05.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:15.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:52:17.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:17.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:19.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:19.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:21.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:21.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:23.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:23.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:25.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:25.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:27.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:27.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:29.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:29.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:31.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:31.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:33.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:33.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:35.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:35.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:37.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:37.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:39.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:49.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:52:51.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:51.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:53.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:53.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:55.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:55.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:57.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:52:57.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:52:59.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:53:09.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:11.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:53:21.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:23.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:53:33.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:35.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:53:35.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should only target nodes with endpoints (Spec Runtime: 5m0.656s) test/e2e/network/loadbalancer.go:1346 In [It] (Node Runtime: 5m0.001s) test/e2e/network/loadbalancer.go:1346 At [By Step] waiting for service endpoint on node bootstrap-e2e-minion-group-327c (Step Runtime: 3m28.267s) test/e2e/network/loadbalancer.go:1395 Spec Goroutine goroutine 3601 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003b92f48, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x50?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x2fdaaaa?, 0xc002f7dca0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x2fdaa30?, 0x7fe0bc8?, 0xc0000820c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc00383a250, 0xd}, 0x1f91, {0xae73300, 0x0, 0x0}, 0xc003641de8?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1404 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00366a000}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:53:37.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:53:37.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:53:39.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:53:49.888: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:49.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should only target nodes with endpoints (Spec Runtime: 5m20.658s) test/e2e/network/loadbalancer.go:1346 In [It] (Node Runtime: 5m20.003s) test/e2e/network/loadbalancer.go:1346 At [By Step] waiting for service endpoint on node bootstrap-e2e-minion-group-327c (Step Runtime: 3m48.269s) test/e2e/network/loadbalancer.go:1395 Spec Goroutine goroutine 3601 [select] net/http.(*Transport).getConn(0xc00355d040, 0xc0043743c0, {{}, 0x0, {0xc002994c30, 0x4}, {0xc003db14e8, 0x12}, 0x0}) /usr/local/go/src/net/http/transport.go:1376 net/http.(*Transport).roundTrip(0xc00355d040, 0xc0015fad00) /usr/local/go/src/net/http/transport.go:582 net/http.(*Transport).RoundTrip(0xc0015fad00?, 0x7fadc80?) /usr/local/go/src/net/http/roundtrip.go:17 net/http.send(0xc0015fac00, {0x7fadc80, 0xc00355d040}, {0x74d54e0?, 0x26b3a01?, 0xae40400?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc002fd5350, 0xc0015fac00, {0x0?, 0x262a61f?, 0xae40400?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc002fd5350, 0xc0015fac00) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 net/http.(*Client).Get(0x2?, {0xc002994c30?, 0x9?}) /usr/local/go/src/net/http/client.go:479 k8s.io/kubernetes/test/e2e/framework/network.httpGetNoConnectionPoolTimeout({0xc002994c30, 0x28}, 0x2540be400) test/e2e/framework/network/utils.go:1065 k8s.io/kubernetes/test/e2e/framework/network.PokeHTTP({0xc00383a250, 0xd}, 0x1f91, {0x75ddb6b, 0xf}, 0xc003a07b84?) test/e2e/framework/network/utils.go:998 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes.func1() test/e2e/framework/service/util.go:35 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x5?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003b92f48, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x50?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x2fdaaaa?, 0xc002f7dca0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x2fdaa30?, 0x7fe0bc8?, 0xc0000820c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc00383a250, 0xd}, 0x1f91, {0xae73300, 0x0, 0x0}, 0xc003641de8?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1404 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00366a000}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:53:59.888: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:59.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:53:59.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:01.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:01.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:03.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:03.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:05.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:15.888: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should only target nodes with endpoints (Spec Runtime: 5m40.661s) test/e2e/network/loadbalancer.go:1346 In [It] (Node Runtime: 5m40.006s) test/e2e/network/loadbalancer.go:1346 At [By Step] waiting for service endpoint on node bootstrap-e2e-minion-group-327c (Step Runtime: 4m8.272s) test/e2e/network/loadbalancer.go:1395 Spec Goroutine goroutine 3601 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003b92f48, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x50?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x2fdaaaa?, 0xc002f7dca0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x2fdaa30?, 0x7fe0bc8?, 0xc0000820c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc00383a250, 0xd}, 0x1f91, {0xae73300, 0x0, 0x0}, 0xc003641de8?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1404 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00366a000}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:54:17.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:17.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:19.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:19.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:21.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:21.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:23.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:33.888: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:54:33.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should only target nodes with endpoints (Spec Runtime: 6m0.664s) test/e2e/network/loadbalancer.go:1346 In [It] (Node Runtime: 6m0.009s) test/e2e/network/loadbalancer.go:1346 At [By Step] waiting for service endpoint on node bootstrap-e2e-minion-group-327c (Step Runtime: 4m28.275s) test/e2e/network/loadbalancer.go:1395 Spec Goroutine goroutine 3601 [select] net/http.(*Transport).getConn(0xc00355c3c0, 0xc0031b8100, {{}, 0x0, {0xc0032a8120, 0x4}, {0xc003aac4e0, 0x12}, 0x0}) /usr/local/go/src/net/http/transport.go:1376 net/http.(*Transport).roundTrip(0xc00355c3c0, 0xc002018500) /usr/local/go/src/net/http/transport.go:582 net/http.(*Transport).RoundTrip(0xc002018500?, 0x7fadc80?) /usr/local/go/src/net/http/roundtrip.go:17 net/http.send(0xc002018400, {0x7fadc80, 0xc00355c3c0}, {0x74d54e0?, 0x26b3a01?, 0xae40400?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc0033ea5d0, 0xc002018400, {0x0?, 0x262a61f?, 0xae40400?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc0033ea5d0, 0xc002018400) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 net/http.(*Client).Get(0x2?, {0xc0032a8120?, 0x9?}) /usr/local/go/src/net/http/client.go:479 k8s.io/kubernetes/test/e2e/framework/network.httpGetNoConnectionPoolTimeout({0xc0032a8120, 0x28}, 0x2540be400) test/e2e/framework/network/utils.go:1065 k8s.io/kubernetes/test/e2e/framework/network.PokeHTTP({0xc00383a250, 0xd}, 0x1f91, {0x75ddb6b, 0xf}, 0xc003a07b84?) test/e2e/framework/network/utils.go:998 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes.func1() test/e2e/framework/service/util.go:35 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x5?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003b92f48, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x50?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x2fdaaaa?, 0xc002f7dca0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x2fdaa30?, 0x7fe0bc8?, 0xc0000820c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc00383a250, 0xd}, 0x1f91, {0xae73300, 0x0, 0x0}, 0xc003641de8?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1404 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00366a000}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:54:43.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:54:45.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:45.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:47.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:47.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:49.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:54:49.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:54:51.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should only target nodes with endpoints (Spec Runtime: 6m20.667s) test/e2e/network/loadbalancer.go:1346 In [It] (Node Runtime: 6m20.012s) test/e2e/network/loadbalancer.go:1346 At [By Step] waiting for service endpoint on node bootstrap-e2e-minion-group-327c (Step Runtime: 4m48.277s) test/e2e/network/loadbalancer.go:1395 Spec Goroutine goroutine 3601 [select] net/http.(*Transport).getConn(0xc00355cf00, 0xc0031b8280, {{}, 0x0, {0xc0032a8cf0, 0x4}, {0xc003aac870, 0x12}, 0x0}) /usr/local/go/src/net/http/transport.go:1376 net/http.(*Transport).roundTrip(0xc00355cf00, 0xc002018900) /usr/local/go/src/net/http/transport.go:582 net/http.(*Transport).RoundTrip(0xc002018900?, 0x7fadc80?) /usr/local/go/src/net/http/roundtrip.go:17 net/http.send(0xc002018800, {0x7fadc80, 0xc00355cf00}, {0x74d54e0?, 0x26b3a01?, 0xae40400?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc0033eabd0, 0xc002018800, {0x0?, 0x262a61f?, 0xae40400?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc0033eabd0, 0xc002018800) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 net/http.(*Client).Get(0x2?, {0xc0032a8cf0?, 0x9?}) /usr/local/go/src/net/http/client.go:479 k8s.io/kubernetes/test/e2e/framework/network.httpGetNoConnectionPoolTimeout({0xc0032a8cf0, 0x28}, 0x2540be400) test/e2e/framework/network/utils.go:1065 k8s.io/kubernetes/test/e2e/framework/network.PokeHTTP({0xc00383a250, 0xd}, 0x1f91, {0x75ddb6b, 0xf}, 0xc003a07b84?) test/e2e/framework/network/utils.go:998 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes.func1() test/e2e/framework/service/util.go:35 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x5?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003b92f48, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x50?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x2fdaaaa?, 0xc002f7dca0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x2fdaa30?, 0x7fe0bc8?, 0xc0000820c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc00383a250, 0xd}, 0x1f91, {0xae73300, 0x0, 0x0}, 0xc003641de8?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1404 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00366a000}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:55:01.889: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:55:03.889: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:55:03.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:55:05.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:55:05.927: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:55:07.888: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" Nov 26 08:55:07.928: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": dial tcp 34.105.106.64:8081: connect: connection refused Nov 26 08:55:07.928: INFO: Poking "http://34.105.106.64:8081/echo?msg=hello" ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should only target nodes with endpoints (Spec Runtime: 6m40.669s) test/e2e/network/loadbalancer.go:1346 In [It] (Node Runtime: 6m40.015s) test/e2e/network/loadbalancer.go:1346 At [By Step] waiting for service endpoint on node bootstrap-e2e-minion-group-327c (Step Runtime: 5m8.28s) test/e2e/network/loadbalancer.go:1395 Spec Goroutine goroutine 3601 [select] net/http.(*Transport).getConn(0xc0044d2b40, 0xc001f16180, {{}, 0x0, {0xc001976bd0, 0x4}, {0xc003db0d80, 0x12}, 0x0}) /usr/local/go/src/net/http/transport.go:1376 net/http.(*Transport).roundTrip(0xc0044d2b40, 0xc000dfe500) /usr/local/go/src/net/http/transport.go:582 net/http.(*Transport).RoundTrip(0xc000dfe500?, 0x7fadc80?) /usr/local/go/src/net/http/roundtrip.go:17 net/http.send(0xc000dfe400, {0x7fadc80, 0xc0044d2b40}, {0x74d54e0?, 0x26b3a01?, 0xae40400?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc004272750, 0xc000dfe400, {0x0?, 0x262a61f?, 0xae40400?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc004272750, 0xc000dfe400) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 net/http.(*Client).Get(0x2?, {0xc001976bd0?, 0x9?}) /usr/local/go/src/net/http/client.go:479 k8s.io/kubernetes/test/e2e/framework/network.httpGetNoConnectionPoolTimeout({0xc001976bd0, 0x28}, 0x2540be400) test/e2e/framework/network/utils.go:1065 k8s.io/kubernetes/test/e2e/framework/network.PokeHTTP({0xc00383a250, 0xd}, 0x1f91, {0x75ddb6b, 0xf}, 0xc003a07b84?) test/e2e/framework/network/utils.go:998 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes.func1() test/e2e/framework/service/util.go:35 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x5?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003b92f48, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x50?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x2fdaaaa?, 0xc002f7dca0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x2fdaa30?, 0x7fe0bc8?, 0xc0000820c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc00383a250, 0xd}, 0x1f91, {0xae73300, 0x0, 0x0}, 0xc003641de8?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1404 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00366a000}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:55:17.929: INFO: Poke("http://34.105.106.64:8081/echo?msg=hello"): Get "http://34.105.106.64:8081/echo?msg=hello": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:55:17.929: FAIL: Could not reach HTTP service through 34.105.106.64:8081 after 5m0s Full Stack Trace k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc00383a250, 0xd}, 0x1f91, {0xae73300, 0x0, 0x0}, 0xc003641de8?) test/e2e/framework/service/util.go:48 +0x265 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1404 +0x737 Nov 26 08:55:18.108: INFO: Waiting up to 15m0s for service "external-local-nodes" to have no LoadBalancer [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 08:55:28.400: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 Nov 26 08:55:28.483: INFO: Output of kubectl describe svc: Nov 26 08:55:28.483: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-7299 describe svc --namespace=esipp-7299' Nov 26 08:55:29.052: INFO: stderr: "" Nov 26 08:55:29.052: INFO: stdout: "Name: external-local-nodes\nNamespace: esipp-7299\nLabels: testid=external-local-nodes-50b4a8d6-1a3a-4d11-90cf-4283c4345cf9\nAnnotations: <none>\nSelector: testid=external-local-nodes-50b4a8d6-1a3a-4d11-90cf-4283c4345cf9\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.142.204\nIPs: 10.0.142.204\nPort: <unset> 8081/TCP\nTargetPort: 80/TCP\nEndpoints: \nSession Affinity: None\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal EnsuringLoadBalancer 6m52s service-controller Ensuring load balancer\n Normal EnsuredLoadBalancer 6m15s service-controller Ensured load balancer\n\n\nName: node-port-service\nNamespace: esipp-7299\nLabels: <none>\nAnnotations: <none>\nSelector: selector-e6fadc6b-1d42-4cb0-97ed-b96df4d240ea=true\nType: NodePort\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.24.167\nIPs: 10.0.24.167\nPort: http 80/TCP\nTargetPort: 8083/TCP\nNodePort: http 32139/TCP\nEndpoints: 10.64.0.222:8083,10.64.2.190:8083,10.64.3.185:8083\nPort: udp 90/UDP\nTargetPort: 8081/UDP\nNodePort: udp 30827/UDP\nEndpoints: 10.64.0.222:8081,10.64.2.190:8081,10.64.3.185:8081\nSession Affinity: None\nExternal Traffic Policy: Cluster\nEvents: <none>\n\n\nName: session-affinity-service\nNamespace: esipp-7299\nLabels: <none>\nAnnotations: <none>\nSelector: selector-e6fadc6b-1d42-4cb0-97ed-b96df4d240ea=true\nType: NodePort\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.77.162\nIPs: 10.0.77.162\nPort: http 80/TCP\nTargetPort: 8083/TCP\nNodePort: http 31856/TCP\nEndpoints: 10.64.0.222:8083,10.64.2.190:8083,10.64.3.185:8083\nPort: udp 90/UDP\nTargetPort: 8081/UDP\nNodePort: udp 30226/UDP\nEndpoints: 10.64.0.222:8081,10.64.2.190:8081,10.64.3.185:8081\nSession Affinity: ClientIP\nExternal Traffic Policy: Cluster\nEvents: <none>\n" Nov 26 08:55:29.052: INFO: Name: external-local-nodes Namespace: esipp-7299 Labels: testid=external-local-nodes-50b4a8d6-1a3a-4d11-90cf-4283c4345cf9 Annotations: <none> Selector: testid=external-local-nodes-50b4a8d6-1a3a-4d11-90cf-4283c4345cf9 Type: ClusterIP IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.142.204 IPs: 10.0.142.204 Port: <unset> 8081/TCP TargetPort: 80/TCP Endpoints: Session Affinity: None Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal EnsuringLoadBalancer 6m52s service-controller Ensuring load balancer Normal EnsuredLoadBalancer 6m15s service-controller Ensured load balancer Name: node-port-service Namespace: esipp-7299 Labels: <none> Annotations: <none> Selector: selector-e6fadc6b-1d42-4cb0-97ed-b96df4d240ea=true Type: NodePort IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.24.167 IPs: 10.0.24.167 Port: http 80/TCP TargetPort: 8083/TCP NodePort: http 32139/TCP Endpoints: 10.64.0.222:8083,10.64.2.190:8083,10.64.3.185:8083 Port: udp 90/UDP TargetPort: 8081/UDP NodePort: udp 30827/UDP Endpoints: 10.64.0.222:8081,10.64.2.190:8081,10.64.3.185:8081 Session Affinity: None External Traffic Policy: Cluster Events: <none> Name: session-affinity-service Namespace: esipp-7299 Labels: <none> Annotations: <none> Selector: selector-e6fadc6b-1d42-4cb0-97ed-b96df4d240ea=true Type: NodePort IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.77.162 IPs: 10.0.77.162 Port: http 80/TCP TargetPort: 8083/TCP NodePort: http 31856/TCP Endpoints: 10.64.0.222:8083,10.64.2.190:8083,10.64.3.185:8083 Port: udp 90/UDP TargetPort: 8081/UDP NodePort: udp 30226/UDP Endpoints: 10.64.0.222:8081,10.64.2.190:8081,10.64.3.185:8081 Session Affinity: ClientIP External Traffic Policy: Cluster Events: <none> [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:55:29.053 STEP: Collecting events from namespace "esipp-7299". 11/26/22 08:55:29.053 STEP: Found 37 events. 11/26/22 08:55:29.098 Nov 26 08:55:29.098: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for netserver-0: { } Scheduled: Successfully assigned esipp-7299/netserver-0 to bootstrap-e2e-minion-group-327c Nov 26 08:55:29.099: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for netserver-1: { } Scheduled: Successfully assigned esipp-7299/netserver-1 to bootstrap-e2e-minion-group-lz41 Nov 26 08:55:29.099: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for netserver-2: { } Scheduled: Successfully assigned esipp-7299/netserver-2 to bootstrap-e2e-minion-group-s7dx Nov 26 08:55:29.099: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for test-container-pod: { } Scheduled: Successfully assigned esipp-7299/test-container-pod to bootstrap-e2e-minion-group-lz41 Nov 26 08:55:29.099: INFO: At 2022-11-26 08:48:36 +0000 UTC - event for external-local-nodes: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:13 +0000 UTC - event for external-local-nodes: {service-controller } EnsuredLoadBalancer: Ensured load balancer Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:16 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-327c} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:16 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-327c} Started: Started container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:16 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-327c} Created: Created container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:16 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-lz41} FailedMount: MountVolume.SetUp failed for volume "kube-api-access-c54wj" : failed to sync configmap cache: timed out waiting for the condition Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:16 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-s7dx} Started: Started container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:16 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-s7dx} Created: Created container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:16 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-s7dx} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:17 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-327c} Killing: Stopping container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:17 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-lz41} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:17 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-lz41} Created: Created container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:17 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-lz41} Started: Started container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:17 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-lz41} Killing: Stopping container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:18 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-327c} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:18 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-lz41} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:18 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-s7dx} Killing: Stopping container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:19 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-s7dx} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:23 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-lz41} BackOff: Back-off restarting failed container webserver in pod netserver-1_esipp-7299(7327381b-4c99-4aa5-8226-03e31ff559d1) Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:37 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-s7dx} BackOff: Back-off restarting failed container webserver in pod netserver-2_esipp-7299(45a0acc6-25f1-4c62-a11a-43d99a331523) Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:59 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-lz41} Started: Started container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:59 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-lz41} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 08:55:29.099: INFO: At 2022-11-26 08:49:59 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-lz41} Created: Created container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:00 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-lz41} Killing: Stopping container webserver Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:01 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-lz41} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:03 +0000 UTC - event for external-local-nodes: {replication-controller } SuccessfulCreate: Created pod: external-local-nodes-rdhj6 Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:04 +0000 UTC - event for external-local-nodes-rdhj6: {kubelet bootstrap-e2e-minion-group-327c} Started: Started container netexec Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:04 +0000 UTC - event for external-local-nodes-rdhj6: {kubelet bootstrap-e2e-minion-group-327c} Killing: Stopping container netexec Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:04 +0000 UTC - event for external-local-nodes-rdhj6: {kubelet bootstrap-e2e-minion-group-327c} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:04 +0000 UTC - event for external-local-nodes-rdhj6: {kubelet bootstrap-e2e-minion-group-327c} Created: Created container netexec Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:05 +0000 UTC - event for external-local-nodes-rdhj6: {kubelet bootstrap-e2e-minion-group-327c} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 08:55:29.099: INFO: At 2022-11-26 08:50:10 +0000 UTC - event for external-local-nodes-rdhj6: {kubelet bootstrap-e2e-minion-group-327c} BackOff: Back-off restarting failed container netexec in pod external-local-nodes-rdhj6_esipp-7299(bcd9213c-33e0-4e05-8b9b-bd7f7e05008c) Nov 26 08:55:29.099: INFO: At 2022-11-26 08:51:05 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-327c} BackOff: Back-off restarting failed container webserver in pod netserver-0_esipp-7299(2be6dbf8-ba97-494c-94a1-f7cdac421017) Nov 26 08:55:29.143: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 08:55:29.143: INFO: external-local-nodes-rdhj6 bootstrap-e2e-minion-group-327c Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:50:03 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:53:16 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:53:16 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:50:03 +0000 UTC }] Nov 26 08:55:29.143: INFO: netserver-0 bootstrap-e2e-minion-group-327c Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:49:15 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:54:36 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:54:36 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:49:15 +0000 UTC }] Nov 26 08:55:29.143: INFO: netserver-1 bootstrap-e2e-minion-group-lz41 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:49:15 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:55:17 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:55:17 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:49:15 +0000 UTC }] Nov 26 08:55:29.143: INFO: netserver-2 bootstrap-e2e-minion-group-s7dx Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:49:15 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:53:56 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:53:56 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:49:15 +0000 UTC }] Nov 26 08:55:29.143: INFO: test-container-pod bootstrap-e2e-minion-group-lz41 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:49:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:50:02 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:50:02 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:49:58 +0000 UTC }] Nov 26 08:55:29.143: INFO: Nov 26 08:55:29.456: INFO: Logging node info for node bootstrap-e2e-master Nov 26 08:55:29.498: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master ba8d154d-f7d1-4d02-b950-a084eb625244 13522 0 2022-11-26 08:28:16 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 08:54:40 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:54:40 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:54:40 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:54:40 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:54:40 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.83.96.51,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bf41ff823483c389ad7dfd0c1ce16b06,SystemUUID:bf41ff82-3483-c389-ad7d-fd0c1ce16b06,BootID:5db3fc62-e7bb-4715-a04e-2bdf5328dbc8,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:55:29.498: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 08:55:29.542: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 08:55:29.604: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.604: INFO: Container kube-apiserver ready: true, restart count 4 Nov 26 08:55:29.604: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.604: INFO: Container kube-scheduler ready: false, restart count 5 Nov 26 08:55:29.604: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-26 08:27:49 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.604: INFO: Container kube-addon-manager ready: false, restart count 5 Nov 26 08:55:29.604: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-26 08:27:49 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.604: INFO: Container l7-lb-controller ready: false, restart count 7 Nov 26 08:55:29.604: INFO: metadata-proxy-v0.1-xx7th started at 2022-11-26 08:28:23 +0000 UTC (0+2 container statuses recorded) Nov 26 08:55:29.604: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 08:55:29.604: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 08:55:29.604: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.604: INFO: Container etcd-container ready: true, restart count 2 Nov 26 08:55:29.604: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.604: INFO: Container etcd-container ready: true, restart count 3 Nov 26 08:55:29.604: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.604: INFO: Container konnectivity-server-container ready: true, restart count 3 Nov 26 08:55:29.604: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.604: INFO: Container kube-controller-manager ready: false, restart count 6 Nov 26 08:55:29.781: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 08:55:29.781: INFO: Logging node info for node bootstrap-e2e-minion-group-327c Nov 26 08:55:29.823: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-327c 2792e2db-d60b-4a1a-b593-202ac7a81c7e 13611 0 2022-11-26 08:28:14 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-327c kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-327c topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-6566":"bootstrap-e2e-minion-group-327c","csi-hostpath-provisioning-1740":"bootstrap-e2e-minion-group-327c","csi-hostpath-provisioning-3311":"bootstrap-e2e-minion-group-327c","csi-hostpath-provisioning-360":"bootstrap-e2e-minion-group-327c","csi-mock-csi-mock-volumes-5232":"bootstrap-e2e-minion-group-327c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:52:26 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:54:46 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:55:15 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-327c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:54:46 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:54:46 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:54:46 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:54:46 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:46 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:46 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:46 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:24 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:54:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:54:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:54:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:54:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.233.8,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:d5273c9e67ce602a784d93fca00549e5,SystemUUID:d5273c9e-67ce-602a-784d-93fca00549e5,BootID:6b605594-03f1-4a39-9bc1-bb9fc688da43,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-360^a4cd5659-6d67-11ed-b066-d2033b87e55d,DevicePath:,},},Config:nil,},} Nov 26 08:55:29.824: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-327c Nov 26 08:55:29.867: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-327c Nov 26 08:55:29.967: INFO: hostexec-bootstrap-e2e-minion-group-327c-6xj2s started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container agnhost-container ready: false, restart count 3 Nov 26 08:55:29.967: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:47:40 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-attacher ready: true, restart count 5 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 08:55:29.967: INFO: Container csi-resizer ready: true, restart count 5 Nov 26 08:55:29.967: INFO: Container csi-snapshotter ready: true, restart count 5 Nov 26 08:55:29.967: INFO: Container hostpath ready: true, restart count 5 Nov 26 08:55:29.967: INFO: Container liveness-probe ready: true, restart count 5 Nov 26 08:55:29.967: INFO: Container node-driver-registrar ready: true, restart count 5 Nov 26 08:55:29.967: INFO: csi-mockplugin-0 started at 2022-11-26 08:30:43 +0000 UTC (0+3 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: false, restart count 8 Nov 26 08:55:29.967: INFO: Container driver-registrar ready: false, restart count 8 Nov 26 08:55:29.967: INFO: Container mock ready: false, restart count 8 Nov 26 08:55:29.967: INFO: csi-mockplugin-0 started at 2022-11-26 08:34:51 +0000 UTC (0+4 container statuses recorded) Nov 26 08:55:29.967: INFO: Container busybox ready: false, restart count 6 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 08:55:29.967: INFO: Container driver-registrar ready: false, restart count 8 Nov 26 08:55:29.967: INFO: Container mock ready: false, restart count 8 Nov 26 08:55:29.967: INFO: csi-mockplugin-0 started at 2022-11-26 08:31:07 +0000 UTC (0+4 container statuses recorded) Nov 26 08:55:29.967: INFO: Container busybox ready: false, restart count 8 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 08:55:29.967: INFO: Container driver-registrar ready: false, restart count 7 Nov 26 08:55:29.967: INFO: Container mock ready: false, restart count 7 Nov 26 08:55:29.967: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:48:02 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 08:55:29.967: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 08:55:29.967: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 08:55:29.967: INFO: Container hostpath ready: true, restart count 2 Nov 26 08:55:29.967: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 08:55:29.967: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 08:55:29.967: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 08:30:43 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-attacher ready: true, restart count 7 Nov 26 08:55:29.967: INFO: csi-mockplugin-0 started at 2022-11-26 08:30:43 +0000 UTC (0+3 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: false, restart count 8 Nov 26 08:55:29.967: INFO: Container driver-registrar ready: false, restart count 8 Nov 26 08:55:29.967: INFO: Container mock ready: false, restart count 8 Nov 26 08:55:29.967: INFO: csi-mockplugin-0 started at 2022-11-26 08:37:21 +0000 UTC (0+4 container statuses recorded) Nov 26 08:55:29.967: INFO: Container busybox ready: false, restart count 5 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 08:55:29.967: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 08:55:29.967: INFO: Container mock ready: false, restart count 6 Nov 26 08:55:29.967: INFO: hostexec-bootstrap-e2e-minion-group-327c-42nb6 started at 2022-11-26 08:52:00 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 08:55:29.967: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:52:05 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 08:55:29.967: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 08:55:29.967: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 08:55:29.967: INFO: Container hostpath ready: true, restart count 0 Nov 26 08:55:29.967: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 08:55:29.967: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 08:55:29.967: INFO: l7-default-backend-8549d69d99-b5jrs started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 08:55:29.967: INFO: konnectivity-agent-mmmgd started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container konnectivity-agent ready: false, restart count 8 Nov 26 08:55:29.967: INFO: metadata-proxy-v0.1-w74pw started at 2022-11-26 08:28:15 +0000 UTC (0+2 container statuses recorded) Nov 26 08:55:29.967: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 08:55:29.967: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 08:55:29.967: INFO: external-local-nodes-rdhj6 started at 2022-11-26 08:50:03 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container netexec ready: true, restart count 5 Nov 26 08:55:29.967: INFO: csi-mockplugin-0 started at 2022-11-26 08:48:49 +0000 UTC (0+3 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 08:55:29.967: INFO: Container driver-registrar ready: true, restart count 4 Nov 26 08:55:29.967: INFO: Container mock ready: true, restart count 4 Nov 26 08:55:29.967: INFO: netserver-0 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container webserver ready: true, restart count 8 Nov 26 08:55:29.967: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:52:20 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 08:55:29.967: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 08:55:29.967: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 08:55:29.967: INFO: Container hostpath ready: true, restart count 3 Nov 26 08:55:29.967: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 08:55:29.967: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 08:55:29.967: INFO: netserver-0 started at 2022-11-26 08:49:15 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container webserver ready: false, restart count 4 Nov 26 08:55:29.967: INFO: kube-dns-autoscaler-5f6455f985-tnj96 started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container autoscaler ready: false, restart count 8 Nov 26 08:55:29.967: INFO: pvc-volume-tester-6957f started at 2022-11-26 08:33:17 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container volume-tester ready: false, restart count 0 Nov 26 08:55:29.967: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:37:11 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 08:55:29.967: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 08:55:29.967: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 08:55:29.967: INFO: Container hostpath ready: false, restart count 5 Nov 26 08:55:29.967: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 08:55:29.967: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 08:55:29.967: INFO: pod-43d27f70-d941-4117-b96d-c563fc43297f started at 2022-11-26 08:39:32 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:55:29.967: INFO: kube-proxy-bootstrap-e2e-minion-group-327c started at 2022-11-26 08:28:14 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container kube-proxy ready: false, restart count 8 Nov 26 08:55:29.967: INFO: coredns-6d97d5ddb-cz84m started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container coredns ready: false, restart count 9 Nov 26 08:55:29.967: INFO: coredns-6d97d5ddb-q6tzt started at 2022-11-26 08:28:28 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container coredns ready: false, restart count 9 Nov 26 08:55:29.967: INFO: volume-snapshot-controller-0 started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container volume-snapshot-controller ready: false, restart count 9 Nov 26 08:55:29.967: INFO: pod-d4f0fe4d-227a-40e8-929b-a033a1faef35 started at 2022-11-26 08:39:35 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:29.967: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:55:29.967: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:48:35 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:29.967: INFO: Container csi-attacher ready: false, restart count 4 Nov 26 08:55:29.967: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 08:55:29.967: INFO: Container csi-resizer ready: false, restart count 4 Nov 26 08:55:29.967: INFO: Container csi-snapshotter ready: false, restart count 4 Nov 26 08:55:29.967: INFO: Container hostpath ready: false, restart count 4 Nov 26 08:55:29.967: INFO: Container liveness-probe ready: false, restart count 4 Nov 26 08:55:29.967: INFO: Container node-driver-registrar ready: false, restart count 4 Nov 26 08:55:30.304: INFO: Latency metrics for node bootstrap-e2e-minion-group-327c Nov 26 08:55:30.304: INFO: Logging node info for node bootstrap-e2e-minion-group-lz41 Nov 26 08:55:30.349: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-lz41 2e433af5-3311-4285-97fa-cde6a9a5b261 13536 0 2022-11-26 08:28:20 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-lz41 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-lz41 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-8230":"bootstrap-e2e-minion-group-lz41","csi-hostpath-provisioning-9560":"bootstrap-e2e-minion-group-lz41"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:48:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 08:53:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status} {node-problem-detector Update v1 2022-11-26 08:54:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-lz41,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:42 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:42 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:54:42 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:54:42 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:54:42 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:54:42 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:42 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:53:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:53:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:53:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:53:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.83.179.153,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:aa52222b7f092a36e5e364be7d47d224,SystemUUID:aa52222b-7f09-2a36-e5e3-64be7d47d224,BootID:74a831b5-c273-4958-8b03-2d43808117f5,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9 kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},},Config:nil,},} Nov 26 08:55:30.350: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-lz41 Nov 26 08:55:30.398: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-lz41 Nov 26 08:55:30.476: INFO: test-hostpath-type-vz47t started at 2022-11-26 08:51:40 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 08:55:30.476: INFO: back-off-cap started at 2022-11-26 08:33:28 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container back-off-cap ready: false, restart count 9 Nov 26 08:55:30.476: INFO: pod-back-off-image started at 2022-11-26 08:36:35 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container back-off ready: false, restart count 8 Nov 26 08:55:30.476: INFO: pod-configmaps-e34354ab-e827-4e6b-a435-5ad3a6953665 started at 2022-11-26 08:50:10 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 08:55:30.476: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:52:21 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:30.476: INFO: Container csi-attacher ready: false, restart count 4 Nov 26 08:55:30.476: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 08:55:30.476: INFO: Container csi-resizer ready: false, restart count 4 Nov 26 08:55:30.476: INFO: Container csi-snapshotter ready: false, restart count 4 Nov 26 08:55:30.476: INFO: Container hostpath ready: false, restart count 4 Nov 26 08:55:30.476: INFO: Container liveness-probe ready: false, restart count 4 Nov 26 08:55:30.476: INFO: Container node-driver-registrar ready: false, restart count 4 Nov 26 08:55:30.476: INFO: metadata-proxy-v0.1-gtpkq started at 2022-11-26 08:28:22 +0000 UTC (0+2 container statuses recorded) Nov 26 08:55:30.476: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 08:55:30.476: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 08:55:30.476: INFO: metrics-server-v0.5.2-867b8754b9-q5chn started at 2022-11-26 08:28:48 +0000 UTC (0+2 container statuses recorded) Nov 26 08:55:30.476: INFO: Container metrics-server ready: false, restart count 9 Nov 26 08:55:30.476: INFO: Container metrics-server-nanny ready: false, restart count 9 Nov 26 08:55:30.476: INFO: kube-proxy-bootstrap-e2e-minion-group-lz41 started at 2022-11-26 08:28:20 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container kube-proxy ready: false, restart count 8 Nov 26 08:55:30.476: INFO: pod-secrets-532ad663-62f4-49e0-b24f-023151be8cd0 started at 2022-11-26 08:48:20 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 08:55:30.476: INFO: mutability-test-h9msf started at 2022-11-26 08:46:47 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container netexec ready: true, restart count 4 Nov 26 08:55:30.476: INFO: pod-6d904d9d-47d9-4612-b9ba-75195733a8e3 started at 2022-11-26 08:32:05 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:55:30.476: INFO: pod-secrets-237c2934-d670-464d-9497-3fea99e7afae started at 2022-11-26 08:35:10 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 08:55:30.476: INFO: netserver-1 started at 2022-11-26 08:49:15 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.476: INFO: Container webserver ready: false, restart count 2 Nov 26 08:55:30.476: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:47:14 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:30.477: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 08:55:30.477: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 08:55:30.477: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 08:55:30.477: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 08:55:30.477: INFO: Container hostpath ready: true, restart count 2 Nov 26 08:55:30.477: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 08:55:30.477: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 08:55:30.477: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:47:44 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:30.477: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 08:55:30.477: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 08:55:30.477: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 08:55:30.477: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 08:55:30.477: INFO: Container hostpath ready: true, restart count 3 Nov 26 08:55:30.477: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 08:55:30.477: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 08:55:30.477: INFO: konnectivity-agent-8v4r5 started at 2022-11-26 08:28:34 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.477: INFO: Container konnectivity-agent ready: true, restart count 8 Nov 26 08:55:30.477: INFO: netserver-1 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.477: INFO: Container webserver ready: false, restart count 8 Nov 26 08:55:30.477: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:31:57 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:30.477: INFO: Container csi-attacher ready: false, restart count 7 Nov 26 08:55:30.477: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 08:55:30.477: INFO: Container csi-resizer ready: false, restart count 7 Nov 26 08:55:30.477: INFO: Container csi-snapshotter ready: false, restart count 7 Nov 26 08:55:30.477: INFO: Container hostpath ready: false, restart count 7 Nov 26 08:55:30.477: INFO: Container liveness-probe ready: false, restart count 7 Nov 26 08:55:30.477: INFO: Container node-driver-registrar ready: false, restart count 7 Nov 26 08:55:30.477: INFO: external-local-pods-7f2ql started at 2022-11-26 08:51:57 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.477: INFO: Container netexec ready: false, restart count 3 Nov 26 08:55:30.477: INFO: mutability-test-njz4b started at 2022-11-26 08:46:47 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.477: INFO: Container netexec ready: true, restart count 2 Nov 26 08:55:30.477: INFO: test-container-pod started at 2022-11-26 08:49:58 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.477: INFO: Container webserver ready: true, restart count 1 Nov 26 08:55:30.715: INFO: Latency metrics for node bootstrap-e2e-minion-group-lz41 Nov 26 08:55:30.715: INFO: Logging node info for node bootstrap-e2e-minion-group-s7dx Nov 26 08:55:30.757: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-s7dx 94f7d8ed-1dd6-4a3f-9454-b3d54cd8c750 13546 0 2022-11-26 08:28:22 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-s7dx kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-s7dx topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-5915":"bootstrap-e2e-minion-group-s7dx","csi-hostpath-provisioning-5957":"bootstrap-e2e-minion-group-s7dx","csi-hostpath-volumemode-3424":"bootstrap-e2e-minion-group-s7dx","csi-mock-csi-mock-volumes-3757":"csi-mock-csi-mock-volumes-3757","csi-mock-csi-mock-volumes-9990":"bootstrap-e2e-minion-group-s7dx"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:52:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 08:54:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status} {node-problem-detector Update v1 2022-11-26 08:54:45 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-s7dx,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:54:45 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:54:45 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:45 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:45 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:54:45 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:54:45 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:54:45 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:52:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:52:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:52:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:52:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.127.51.136,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7318864e323f36191a9cc6aee4e5582d,SystemUUID:7318864e-323f-3619-1a9c-c6aee4e5582d,BootID:247dfbca-e301-45ca-b5e7-bc2da79a6926,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:55:30.758: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-s7dx Nov 26 08:55:30.806: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-s7dx Nov 26 08:55:30.884: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:52:00 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:30.884: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 08:55:30.884: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 08:55:30.884: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 08:55:30.884: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 08:55:30.884: INFO: Container hostpath ready: true, restart count 0 Nov 26 08:55:30.884: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 08:55:30.884: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 08:55:30.884: INFO: pod-6fb046f8-5358-4deb-960a-bec3c718a5d1 started at 2022-11-26 08:47:33 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:55:30.884: INFO: kube-proxy-bootstrap-e2e-minion-group-s7dx started at 2022-11-26 08:28:22 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container kube-proxy ready: false, restart count 8 Nov 26 08:55:30.884: INFO: konnectivity-agent-m6kcz started at 2022-11-26 08:28:34 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container konnectivity-agent ready: false, restart count 7 Nov 26 08:55:30.884: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-8q696 started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container agnhost-container ready: false, restart count 6 Nov 26 08:55:30.884: INFO: netserver-2 started at 2022-11-26 08:49:15 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container webserver ready: false, restart count 5 Nov 26 08:55:30.884: INFO: netserver-2 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container webserver ready: true, restart count 7 Nov 26 08:55:30.884: INFO: pod-subpath-test-inlinevolume-4dbw started at 2022-11-26 08:39:23 +0000 UTC (1+2 container statuses recorded) Nov 26 08:55:30.884: INFO: Init container init-volume-inlinevolume-4dbw ready: true, restart count 0 Nov 26 08:55:30.884: INFO: Container test-container-subpath-inlinevolume-4dbw ready: false, restart count 7 Nov 26 08:55:30.884: INFO: Container test-container-volume-inlinevolume-4dbw ready: false, restart count 6 Nov 26 08:55:30.884: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:51:35 +0000 UTC (0+7 container statuses recorded) Nov 26 08:55:30.884: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container hostpath ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 08:55:30.884: INFO: pause-pod-deployment-7c665f9d5d-vbtlb started at 2022-11-26 08:52:00 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container agnhost-pause ready: false, restart count 2 Nov 26 08:55:30.884: INFO: csi-mockplugin-0 started at 2022-11-26 08:49:20 +0000 UTC (0+4 container statuses recorded) Nov 26 08:55:30.884: INFO: Container busybox ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container driver-registrar ready: true, restart count 1 Nov 26 08:55:30.884: INFO: Container mock ready: true, restart count 1 Nov 26 08:55:30.884: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-gnxt2 started at 2022-11-26 08:52:10 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 08:55:30.884: INFO: metadata-proxy-v0.1-m5q9x started at 2022-11-26 08:28:23 +0000 UTC (0+2 container statuses recorded) Nov 26 08:55:30.884: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 08:55:30.884: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 08:55:30.884: INFO: external-local-lb-kvgl4 started at 2022-11-26 08:51:20 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container netexec ready: true, restart count 2 Nov 26 08:55:30.884: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-9j64k started at 2022-11-26 08:51:39 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 08:55:30.884: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 08:48:01 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 08:55:30.884: INFO: csi-mockplugin-0 started at 2022-11-26 08:31:19 +0000 UTC (0+4 container statuses recorded) Nov 26 08:55:30.884: INFO: Container busybox ready: false, restart count 7 Nov 26 08:55:30.884: INFO: Container csi-provisioner ready: false, restart count 8 Nov 26 08:55:30.884: INFO: Container driver-registrar ready: false, restart count 8 Nov 26 08:55:30.884: INFO: Container mock ready: false, restart count 8 Nov 26 08:55:30.884: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-fjbz2 started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container agnhost-container ready: false, restart count 4 Nov 26 08:55:30.884: INFO: csi-mockplugin-0 started at 2022-11-26 08:48:01 +0000 UTC (0+3 container statuses recorded) Nov 26 08:55:30.884: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 08:55:30.884: INFO: Container driver-registrar ready: true, restart count 2 Nov 26 08:55:30.884: INFO: Container mock ready: true, restart count 2 Nov 26 08:55:30.884: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-bb2lb started at 2022-11-26 08:38:41 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container agnhost-container ready: true, restart count 6 Nov 26 08:55:30.884: INFO: pod-645ca7c3-0782-49c3-9f8c-fe2716c177bc started at 2022-11-26 08:39:40 +0000 UTC (0+1 container statuses recorded) Nov 26 08:55:30.884: INFO: Container write-pod ready: false, restart count 0 Nov 26 08:55:31.201: INFO: Latency metrics for node bootstrap-e2e-minion-group-s7dx [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "esipp-7299" for this suite. 11/26/22 08:55:31.201
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfor\stype\=LoadBalancer$'
test/e2e/network/loadbalancer.go:1305 k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 +0x288 There were additional failures detected after the initial failure: [FAILED] Nov 26 08:58:36.860: failed to list events in namespace "esipp-851": Get "https://34.83.96.51/api/v1/namespaces/esipp-851/events": dial tcp 34.83.96.51:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 08:58:36.900: Couldn't delete ns: "esipp-851": Delete "https://34.83.96.51/api/v1/namespaces/esipp-851": dial tcp 34.83.96.51:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.83.96.51/api/v1/namespaces/esipp-851", Err:(*net.OpError)(0xc0028594a0)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:50:03.257 Nov 26 08:50:03.257: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 08:50:03.259 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 08:50:03.519 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 08:50:03.641 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should work for type=LoadBalancer test/e2e/network/loadbalancer.go:1266 STEP: creating a service esipp-851/external-local-lb with type=LoadBalancer 11/26/22 08:50:03.932 STEP: setting ExternalTrafficPolicy=Local 11/26/22 08:50:03.932 STEP: waiting for loadbalancer for service esipp-851/external-local-lb 11/26/22 08:50:04.082 Nov 26 08:50:04.082: INFO: Waiting up to 15m0s for service "external-local-lb" to have a LoadBalancer STEP: creating a pod to be part of the service external-local-lb 11/26/22 08:51:20.232 Nov 26 08:51:20.305: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 08:51:20.367: INFO: Found all 1 pods Nov 26 08:51:20.367: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [external-local-lb-kvgl4] Nov 26 08:51:20.367: INFO: Waiting up to 2m0s for pod "external-local-lb-kvgl4" in namespace "esipp-851" to be "running and ready" Nov 26 08:51:20.425: INFO: Pod "external-local-lb-kvgl4": Phase="Pending", Reason="", readiness=false. Elapsed: 58.075267ms Nov 26 08:51:20.425: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-kvgl4' on 'bootstrap-e2e-minion-group-s7dx' to be 'Running' but was 'Pending' Nov 26 08:51:22.675: INFO: Pod "external-local-lb-kvgl4": Phase="Running", Reason="", readiness=true. Elapsed: 2.307933474s Nov 26 08:51:22.675: INFO: Pod "external-local-lb-kvgl4" satisfied condition "running and ready" Nov 26 08:51:22.675: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [external-local-lb-kvgl4] STEP: waiting for loadbalancer for service esipp-851/external-local-lb 11/26/22 08:51:22.675 Nov 26 08:51:22.675: INFO: Waiting up to 15m0s for service "external-local-lb" to have a LoadBalancer STEP: reading clientIP using the TCP service's service port via its external VIP 11/26/22 08:51:22.782 Nov 26 08:51:22.782: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:23.854: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:51:25.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:25.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:51:27.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:27.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:51:29.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:29.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:51:31.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:41.856: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:51:43.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:43.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:51:45.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:45.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:51:47.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:47.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:51:49.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:51:59.856: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:52:01.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:01.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:03.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:03.893: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:05.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:05.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:07.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:07.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:09.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:09.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:11.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:11.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:13.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:13.893: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:15.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:15.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:17.857: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:27.858: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:52:29.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:29.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:31.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:31.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:33.862: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:33.901: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:35.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:45.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:52:47.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:47.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:52:49.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:59.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:52:59.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:52:59.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:53:01.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:53:01.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:53:03.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:53:13.854: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:13.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:53:23.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:25.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:53:35.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:37.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:53:37.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:53:39.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:53:49.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:53:51.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:53:51.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:53:53.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:03.856: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:54:05.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:05.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:54:07.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:07.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:54:09.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:19.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:54:21.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:21.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:54:23.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:33.856: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:54:35.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:45.856: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:54:47.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:47.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:54:49.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:54:49.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:54:51.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:01.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:55:03.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:03.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 5m0.676s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 5m0.001s) test/e2e/network/loadbalancer.go:1266 At [By Step] reading clientIP using the TCP service's service port via its external VIP (Step Runtime: 3m41.151s) test/e2e/network/loadbalancer.go:1303 Spec Goroutine goroutine 2660 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001034768, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x30?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x0?, 0xc003e0bd80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x6bdb8c0?, 0x7f8f6c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.GetHTTPContent({0xc005288ff0, 0xe}, 0x50, 0x0?, {0x75c4cc0, 0x9}) test/e2e/network/util.go:44 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1304 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:55:05.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:05.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:55:07.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:17.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:55:19.854: INFO: Poking "http://34.105.123.119:80/clientip" ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 5m20.677s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 5m20.002s) test/e2e/network/loadbalancer.go:1266 At [By Step] reading clientIP using the TCP service's service port via its external VIP (Step Runtime: 4m1.153s) test/e2e/network/loadbalancer.go:1303 Spec Goroutine goroutine 2660 [select] net/http.(*Transport).getConn(0xc002074000, 0xc0018fa0c0, {{}, 0x0, {0xc004c06180, 0x4}, {0xc004146480, 0x11}, 0x0}) /usr/local/go/src/net/http/transport.go:1376 net/http.(*Transport).roundTrip(0xc002074000, 0xc000de4e00) /usr/local/go/src/net/http/transport.go:582 net/http.(*Transport).RoundTrip(0xc000de4e00?, 0x7fadc80?) /usr/local/go/src/net/http/roundtrip.go:17 net/http.send(0xc000de4700, {0x7fadc80, 0xc002074000}, {0x74d54e0?, 0x26b3a01?, 0xae40400?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc001c8c1b0, 0xc000de4700, {0x0?, 0xc003e0b618?, 0xae40400?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc001c8c1b0, 0xc000de4700) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 net/http.(*Client).Get(0x2?, {0xc004c06180?, 0x9?}) /usr/local/go/src/net/http/client.go:479 k8s.io/kubernetes/test/e2e/framework/network.httpGetNoConnectionPoolTimeout({0xc004c06180, 0x21}, 0x2540be400) test/e2e/framework/network/utils.go:1065 k8s.io/kubernetes/test/e2e/framework/network.PokeHTTP({0xc005288ff0, 0xe}, 0x50, {0x75c4cc0, 0x9}, 0x0?) test/e2e/framework/network/utils.go:998 > k8s.io/kubernetes/test/e2e/network.GetHTTPContent.func1() test/e2e/network/util.go:45 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0x7fadf60?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001034768, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x30?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x0?, 0xc003e0bd80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x6bdb8c0?, 0x7f8f6c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.GetHTTPContent({0xc005288ff0, 0xe}, 0x50, 0x0?, {0x75c4cc0, 0x9}) test/e2e/network/util.go:44 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1304 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:55:29.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:55:31.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:31.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:55:33.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:43.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:55:43.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:43.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 5m40.679s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 5m40.004s) test/e2e/network/loadbalancer.go:1266 At [By Step] reading clientIP using the TCP service's service port via its external VIP (Step Runtime: 4m21.154s) test/e2e/network/loadbalancer.go:1303 Spec Goroutine goroutine 2660 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001034768, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x30?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x0?, 0xc003e0bd80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x6bdb8c0?, 0x7f8f6c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.GetHTTPContent({0xc005288ff0, 0xe}, 0x50, 0x0?, {0x75c4cc0, 0x9}) test/e2e/network/util.go:44 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1304 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:55:45.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:55.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:55:57.854: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:57.895: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:55:59.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:55:59.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:56:01.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:56:01.894: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": dial tcp 34.105.123.119:80: connect: connection refused Nov 26 08:56:03.855: INFO: Poking "http://34.105.123.119:80/clientip" ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 6m0.681s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 6m0.006s) test/e2e/network/loadbalancer.go:1266 At [By Step] reading clientIP using the TCP service's service port via its external VIP (Step Runtime: 4m41.156s) test/e2e/network/loadbalancer.go:1303 Spec Goroutine goroutine 2660 [select] net/http.(*Transport).getConn(0xc0008b6a00, 0xc00164ec80, {{}, 0x0, {0xc00265ab10, 0x4}, {0xc0010348e8, 0x11}, 0x0}) /usr/local/go/src/net/http/transport.go:1376 net/http.(*Transport).roundTrip(0xc0008b6a00, 0xc001f03200) /usr/local/go/src/net/http/transport.go:582 net/http.(*Transport).RoundTrip(0xc001f03200?, 0x7fadc80?) /usr/local/go/src/net/http/roundtrip.go:17 net/http.send(0xc001f03100, {0x7fadc80, 0xc0008b6a00}, {0x74d54e0?, 0x26b3a01?, 0xae40400?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc001e63290, 0xc001f03100, {0x0?, 0x262a61f?, 0xae40400?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc001e63290, 0xc001f03100) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 net/http.(*Client).Get(0x2?, {0xc00265ab10?, 0x9?}) /usr/local/go/src/net/http/client.go:479 k8s.io/kubernetes/test/e2e/framework/network.httpGetNoConnectionPoolTimeout({0xc00265ab10, 0x21}, 0x2540be400) test/e2e/framework/network/utils.go:1065 k8s.io/kubernetes/test/e2e/framework/network.PokeHTTP({0xc005288ff0, 0xe}, 0x50, {0x75c4cc0, 0x9}, 0x0?) test/e2e/framework/network/utils.go:998 > k8s.io/kubernetes/test/e2e/network.GetHTTPContent.func1() test/e2e/network/util.go:45 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0x7fadf60?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001034768, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x30?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x0?, 0xc003e0bd80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x6bdb8c0?, 0x7f8f6c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.GetHTTPContent({0xc005288ff0, 0xe}, 0x50, 0x0?, {0x75c4cc0, 0x9}) test/e2e/network/util.go:44 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1304 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:56:13.856: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:56:15.854: INFO: Poking "http://34.105.123.119:80/clientip" ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 6m20.684s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 6m20.009s) test/e2e/network/loadbalancer.go:1266 At [By Step] reading clientIP using the TCP service's service port via its external VIP (Step Runtime: 5m1.159s) test/e2e/network/loadbalancer.go:1303 Spec Goroutine goroutine 2660 [select] net/http.(*Transport).getConn(0xc000040780, 0xc001f62080, {{}, 0x0, {0xc0042e1380, 0x4}, {0xc0002159c8, 0x11}, 0x0}) /usr/local/go/src/net/http/transport.go:1376 net/http.(*Transport).roundTrip(0xc000040780, 0xc000216200) /usr/local/go/src/net/http/transport.go:582 net/http.(*Transport).RoundTrip(0xc000216200?, 0x7fadc80?) /usr/local/go/src/net/http/roundtrip.go:17 net/http.send(0xc000641f00, {0x7fadc80, 0xc000040780}, {0x74d54e0?, 0x26b3a01?, 0xae40400?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc00414e2a0, 0xc000641f00, {0x0?, 0xc003e0b618?, 0xae40400?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc00414e2a0, 0xc000641f00) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 net/http.(*Client).Get(0x2?, {0xc0042e1380?, 0x9?}) /usr/local/go/src/net/http/client.go:479 k8s.io/kubernetes/test/e2e/framework/network.httpGetNoConnectionPoolTimeout({0xc0042e1380, 0x21}, 0x2540be400) test/e2e/framework/network/utils.go:1065 k8s.io/kubernetes/test/e2e/framework/network.PokeHTTP({0xc005288ff0, 0xe}, 0x50, {0x75c4cc0, 0x9}, 0x0?) test/e2e/framework/network/utils.go:998 > k8s.io/kubernetes/test/e2e/network.GetHTTPContent.func1() test/e2e/network/util.go:45 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0x7fadf60?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001034768, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x30?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x0?, 0xc003e0bd80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x6bdb8c0?, 0x7f8f6c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.GetHTTPContent({0xc005288ff0, 0xe}, 0x50, 0x0?, {0x75c4cc0, 0x9}) test/e2e/network/util.go:44 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1304 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:56:25.855: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:56:25.855: INFO: Poking "http://34.105.123.119:80/clientip" Nov 26 08:56:35.856: INFO: Poke("http://34.105.123.119:80/clientip"): Get "http://34.105.123.119:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 08:56:35.856: INFO: Could not reach HTTP service through 34.105.123.119:80/clientip after 5m0s: timed out waiting for the condition Nov 26 08:56:35.856: INFO: Unexpected error: <*errors.errorString | 0xc000205d60>: { s: "timed out waiting for the condition", } Nov 26 08:56:35.856: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 +0x288 Nov 26 08:56:36.179: INFO: Waiting up to 15m0s for service "external-local-lb" to have no LoadBalancer ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 6m40.686s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 6m40.011s) test/e2e/network/loadbalancer.go:1266 At [By Step] reading clientIP using the TCP service's service port via its external VIP (Step Runtime: 5m21.161s) test/e2e/network/loadbalancer.go:1303 Spec Goroutine goroutine 2660 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc004147530, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0xa8?, 0x2fd9d05?, 0x48?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc000136000}, 0xc003e0b708?, 0xc003e0b6f8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x7ffe0bf3e4fb?, 0xa?, 0x7fe0bc8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 k8s.io/kubernetes/test/e2e/framework/providers/gce.(*Provider).EnsureLoadBalancerResourcesDeleted(0xc000e20480, {0xc00224c300, 0xe}, {0x77c6ae2, 0x2}) test/e2e/framework/providers/gce/gce.go:195 k8s.io/kubernetes/test/e2e/framework.EnsureLoadBalancerResourcesDeleted(...) test/e2e/framework/util.go:551 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancerDestroy.func1() test/e2e/framework/service/jig.go:602 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancerDestroy(0xc0050f38b0, {0xc00224c300?, 0x100c00417b6b0?}, 0x7fdc1c3bb250?, 0x0?) test/e2e/framework/service/jig.go:614 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).ChangeServiceType(0x0?, {0x75c5095?, 0x0?}, 0x0?) test/e2e/framework/service/jig.go:186 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1278 panic({0x70eb7e0, 0xc0000f8620}) /usr/local/go/src/runtime/panic.go:884 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc002a74080, 0x38}, {0xc003e0bd58?, 0x75b521a?, 0xc003e0bd78?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 k8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?}) test/e2e/framework/log.go:61 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?}) test/e2e/framework/expect.go:76 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 7m0.687s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 7m0.013s) test/e2e/network/loadbalancer.go:1266 At [By Step] reading clientIP using the TCP service's service port via its external VIP (Step Runtime: 5m41.163s) test/e2e/network/loadbalancer.go:1303 Spec Goroutine goroutine 2660 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001034468, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x48?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc000136000}, 0x2663d71?, 0xc0004f6e98?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x7fbc960?, 0xc005220540?, 0x7fb8220?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForAvailableEndpoint(0xc0050f38b0, 0x0?) test/e2e/framework/service/jig.go:444 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).GetEndpointNodeNames(0xc0050f38b0) test/e2e/framework/service/jig.go:308 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).ListNodesWithEndpoint(0xc0050f38b0) test/e2e/framework/service/jig.go:287 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).GetEndpointNodesWithIP(0xc004ca8188?, {0x75c8fac, 0xa}) test/e2e/framework/service/jig.go:273 > k8s.io/kubernetes/test/e2e/network.getEndpointNodesWithInternalIP(0x0?) test/e2e/network/service.go:746 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1283 panic({0x70eb7e0, 0xc0000f8620}) /usr/local/go/src/runtime/panic.go:884 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc002a74080, 0x38}, {0xc003e0bd58?, 0x75b521a?, 0xc003e0bd78?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 k8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?}) test/e2e/framework/log.go:61 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?}) test/e2e/framework/expect.go:76 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ STEP: Performing setup for networking test in namespace esipp-851 11/26/22 08:57:15.535 STEP: creating a selector 11/26/22 08:57:15.535 STEP: Creating the service pods in kubernetes 11/26/22 08:57:15.535 Nov 26 08:57:15.535: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Nov 26 08:57:15.909: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "esipp-851" to be "running and ready" Nov 26 08:57:16.010: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 100.108061ms Nov 26 08:57:16.010: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 08:57:18.088: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.178253859s Nov 26 08:57:18.088: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:20.084: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.174850179s Nov 26 08:57:20.084: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:22.066: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.15686937s Nov 26 08:57:22.066: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 7m20.69s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 7m20.015s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 8.411s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 2660 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc004147e90, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0xb8?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x75b521a?, 0xc0004f7208?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc001b7eb60}, {0xc000df1780, 0x9}, {0xc001279c63, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc001b7eb60?}, {0xc001279c63?, 0xc002976820?}, {0xc000df1780?, 0xc0004f7450?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000b7eb60, {0x75c6f7c, 0x9}, 0xc001a7e1e0) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000b7eb60, 0x7fdc1c3b46c0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000b7eb60, 0x3b?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc0012e2000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 panic({0x70eb7e0, 0xc0000f8620}) /usr/local/go/src/runtime/panic.go:884 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc002a74080, 0x38}, {0xc003e0bd58?, 0x75b521a?, 0xc003e0bd78?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 k8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?}) test/e2e/framework/log.go:61 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?}) test/e2e/framework/expect.go:76 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:57:24.088: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.17878545s Nov 26 08:57:24.088: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:26.077: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.16777832s Nov 26 08:57:26.077: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:28.065: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.155818002s Nov 26 08:57:28.065: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:30.144: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.234889143s Nov 26 08:57:30.144: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:32.088: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.178878921s Nov 26 08:57:32.088: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:34.073: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.163428154s Nov 26 08:57:34.073: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:36.071: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.161582222s Nov 26 08:57:36.071: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 08:57:38.060: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.150735002s Nov 26 08:57:38.060: INFO: The phase of Pod netserver-0 is Running (Ready = true) Nov 26 08:57:38.060: INFO: Pod "netserver-0" satisfied condition "running and ready" Nov 26 08:57:38.113: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "esipp-851" to be "running and ready" Nov 26 08:57:38.169: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 56.375293ms Nov 26 08:57:38.169: INFO: The phase of Pod netserver-1 is Running (Ready = true) Nov 26 08:57:38.169: INFO: Pod "netserver-1" satisfied condition "running and ready" Nov 26 08:57:38.223: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "esipp-851" to be "running and ready" Nov 26 08:57:38.272: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 49.710837ms Nov 26 08:57:38.272: INFO: The phase of Pod netserver-2 is Running (Ready = true) Nov 26 08:57:38.272: INFO: Pod "netserver-2" satisfied condition "running and ready" STEP: Creating test pods 11/26/22 08:57:38.326 Nov 26 08:57:38.387: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "esipp-851" to be "running" Nov 26 08:57:38.439: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 52.780513ms Nov 26 08:57:40.500: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.113284831s Nov 26 08:57:40.500: INFO: Pod "test-container-pod" satisfied condition "running" Nov 26 08:57:40.550: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 STEP: Getting node addresses 11/26/22 08:57:40.55 Nov 26 08:57:40.550: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable STEP: Creating the service on top of the pods in kubernetes 11/26/22 08:57:40.682 Nov 26 08:57:40.879: INFO: Service node-port-service in namespace esipp-851 found. Nov 26 08:57:41.078: INFO: Service session-affinity-service in namespace esipp-851 found. STEP: Waiting for NodePort service to expose endpoint 11/26/22 08:57:41.128 Nov 26 08:57:42.129: INFO: Waiting for amount of service:node-port-service endpoints to be 3 STEP: Waiting for Session Affinity service to expose endpoint 11/26/22 08:57:42.265 Nov 26 08:57:43.265: INFO: Waiting for amount of service:session-affinity-service endpoints to be 3 Nov 26 08:57:43.428: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:43.428: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:43.429: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:43.429: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:43.626: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 7m40.691s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 7m40.017s) test/e2e/network/loadbalancer.go:1266 At [By Step] Waiting for Session Affinity service to expose endpoint (Step Runtime: 1.684s) test/e2e/framework/network/utils.go:835 Spec Goroutine goroutine 2660 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001ac7dd0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x68?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0xc0004f78a8?, 0xc0004f78b8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x5a?, 0x0?, 0x1f91?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer(0xc000b7eb60, {0xc0012788a0, 0xa}, 0x76cf, 0x0?, 0x0, 0x2) test/e2e/network/service.go:705 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1287 panic({0x70eb7e0, 0xc0000f8620}) /usr/local/go/src/runtime/panic.go:884 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc002a74080, 0x38}, {0xc003e0bd58?, 0x75b521a?, 0xc003e0bd78?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 k8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?}) test/e2e/framework/log.go:61 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?}) test/e2e/framework/expect.go:76 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:57:44.678: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:44.678: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:44.679: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:44.679: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:44.892: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:45.680: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:45.680: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:45.681: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:45.681: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:45.857: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:46.679: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:46.679: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:46.680: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:46.680: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:46.850: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:47.693: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:47.693: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:47.694: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:47.694: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:47.991: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:48.674: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:48.674: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:48.675: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:48.675: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:48.871: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:49.678: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:49.678: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:49.679: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:49.679: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:49.838: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:50.698: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:50.698: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:50.699: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:50.699: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:50.922: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:51.694: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:51.694: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:51.695: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:51.696: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:51.902: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:52.700: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:52.700: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:52.701: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:52.701: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:52.928: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:53.700: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:53.700: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:53.702: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:53.702: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:53.870: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:54.681: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:54.681: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:54.682: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:54.682: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:54.895: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:55.680: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:55.680: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:55.682: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:55.682: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:55.844: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:56.718: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:56.718: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:56.719: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:56.719: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:57.002: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:57.690: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:57.690: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:57.691: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:57.691: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:57.945: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:58.679: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:58.679: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:58.680: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:58.680: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:58.865: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:57:59.683: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:57:59.683: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:57:59.685: INFO: ExecWithOptions: Clientset creation Nov 26 08:57:59.685: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:57:59.842: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:00.780: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:00.780: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:00.781: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:00.781: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:00.984: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:01.716: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:01.716: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:01.717: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:01.717: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:01.919: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:02.680: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:02.680: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:02.681: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:02.681: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:02.851: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:03.684: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:03.684: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:03.686: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:03.686: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:03.889: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 8m0.694s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 8m0.019s) test/e2e/network/loadbalancer.go:1266 At [By Step] Waiting for Session Affinity service to expose endpoint (Step Runtime: 21.686s) test/e2e/framework/network/utils.go:835 Spec Goroutine goroutine 2660 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001ac7dd0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x68?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0xc0004f78a8?, 0xc0004f78b8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x5a?, 0x0?, 0x1f91?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer(0xc000b7eb60, {0xc0012788a0, 0xa}, 0x76cf, 0x0?, 0x0, 0x2) test/e2e/network/service.go:705 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1287 panic({0x70eb7e0, 0xc0000f8620}) /usr/local/go/src/runtime/panic.go:884 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc002a74080, 0x38}, {0xc003e0bd58?, 0x75b521a?, 0xc003e0bd78?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 k8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?}) test/e2e/framework/log.go:61 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?}) test/e2e/framework/expect.go:76 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:58:04.672: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:04.672: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:04.673: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:04.673: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:04.833: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:05.682: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:05.682: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:05.683: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:05.683: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:05.877: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:06.720: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:06.720: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:06.721: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:06.721: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:06.972: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:07.694: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:07.694: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:07.696: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:07.696: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:08.057: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:08.685: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:08.685: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:08.686: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:08.686: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:08.874: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:09.678: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:09.678: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:09.680: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:09.680: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:09.848: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:10.690: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:10.690: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:10.692: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:10.692: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:10.899: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:11.751: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:11.751: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:11.752: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:11.752: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:11.938: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:12.707: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:12.707: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:12.708: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:12.708: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:12.976: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:13.710: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:13.710: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:13.711: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:13.711: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:13.885: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:14.679: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:14.679: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:14.681: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:14.681: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:14.881: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:15.679: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:15.679: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:15.680: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:15.680: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:15.855: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:16.683: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:16.683: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:16.684: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:16.684: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:16.954: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:17.683: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:17.683: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:17.684: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:17.684: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:17.931: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:18.699: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:18.699: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:18.700: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:18.700: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:18.881: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:19.675: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:19.675: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:19.677: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:19.677: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:19.838: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:20.678: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:20.678: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:20.679: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:20.679: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:20.840: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:21.738: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:21.738: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:21.740: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:21.740: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:21.960: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:22.716: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:22.717: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:22.718: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:22.718: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:22.878: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:23.706: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:23.706: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:23.707: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:23.707: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:23.908: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" ------------------------------ Progress Report for Ginkgo Process #12 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 8m20.697s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 8m20.022s) test/e2e/network/loadbalancer.go:1266 At [By Step] Waiting for Session Affinity service to expose endpoint (Step Runtime: 41.689s) test/e2e/framework/network/utils.go:835 Spec Goroutine goroutine 2660 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001ac7dd0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x68?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0xc0004f78a8?, 0xc0004f78b8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x5a?, 0x0?, 0x1f91?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer(0xc000b7eb60, {0xc0012788a0, 0xa}, 0x76cf, 0x0?, 0x0, 0x2) test/e2e/network/service.go:705 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1287 panic({0x70eb7e0, 0xc0000f8620}) /usr/local/go/src/runtime/panic.go:884 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc002a74080, 0x38}, {0xc003e0bd58?, 0x75b521a?, 0xc003e0bd78?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 k8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?}) test/e2e/framework/log.go:61 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?}) test/e2e/framework/expect.go:76 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:58:24.713: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:24.713: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:24.714: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:24.714: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:24.947: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:25.683: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:25.683: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:25.685: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:25.685: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:25.840: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:26.685: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:26.685: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:26.687: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:26.687: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:26.919: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:27.741: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:27.741: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:27.743: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:27.743: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:28.018: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:28.686: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:28.686: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:28.687: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:28.687: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:28.889: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:29.678: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:29.678: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:29.679: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:29.679: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:29.857: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:30.696: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:30.696: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:30.698: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:30.698: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:30.927: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:31.703: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:31.703: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:31.723: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:31.723: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:31.939: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:32.691: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:32.691: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:32.692: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:32.692: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:32.867: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:33.698: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:33.698: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:33.700: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:33.700: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:33.881: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:34.702: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:34.702: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:34.704: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:34.704: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:34.895: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:35.692: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz] Namespace:esipp-851 PodName:test-container-pod ContainerName:webserver Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} Nov 26 08:58:35.692: INFO: >>> kubeConfig: /workspace/.kube/config Nov 26 08:58:35.693: INFO: ExecWithOptions: Clientset creation Nov 26 08:58:35.693: INFO: ExecWithOptions: execute(POST https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+-o+%2Fdev%2Fnull+-w+%25%7Bhttp_code%7D+http%3A%2F%2F10.138.0.5%3A30415%2Fhealthz&container=webserver&container=webserver&stderr=true&stdout=true) Nov 26 08:58:35.858: INFO: Got error reading status code from http://10.138.0.5:30415/healthz via test container: failed to execute "curl -g -q -s -o /dev/null -w %{http_code} http://10.138.0.5:30415/healthz": error dialing backend: No agent available, stderr: "" Nov 26 08:58:36.667: INFO: Unexpected error: failed to get pod test-container-pod: <*url.Error | 0xc0046ffe30>: { Op: "Get", URL: "https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod", Err: <*net.OpError | 0xc0041798b0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc004e6c060>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 83, 96, 51], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc0048c3b20>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 08:58:36.667: FAIL: failed to get pod test-container-pod: Get "https://34.83.96.51/api/v1/namespaces/esipp-851/pods/test-container-pod": dial tcp 34.83.96.51:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/pod.execCommandInPodWithFullOutput(0x771f0dc?, {0xc001ac69f0, 0x12}, {0xc004de3da0, 0x3, 0x3}) test/e2e/framework/pod/exec_util.go:126 +0x133 k8s.io/kubernetes/test/e2e/framework/pod.ExecShellInPodWithFullOutput(...) test/e2e/framework/pod/exec_util.go:138 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).GetHTTPCodeFromTestContainer(0xc000b7eb60, {0x75c17ef, 0x8}, {0xc0012788a0?, 0x1?}, 0x3?) test/e2e/framework/network/utils.go:420 +0x1a5 k8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer.func1() test/e2e/network/service.go:689 +0x72 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0xc00162de80?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001ac7dd0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x68?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0xc0004f78a8?, 0xc0004f78b8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x5a?, 0x0?, 0x1f91?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50 k8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer(0xc000b7eb60, {0xc0012788a0, 0xa}, 0x76cf, 0x0?, 0x0, 0x2) test/e2e/network/service.go:705 +0x139 k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1287 +0x188 panic({0x70eb7e0, 0xc0000f8620}) /usr/local/go/src/runtime/panic.go:884 +0x212 k8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?}) test/e2e/framework/log.go:61 +0x145 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?}) test/e2e/framework/expect.go:76 +0x267 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 +0x288 E1126 08:58:36.668056 8155 runtime.go:79] Observed a panic: types.GinkgoError{Heading:"Your Test Panicked", Message:"When you, or your assertion library, calls Ginkgo's Fail(),\nGinkgo panics to prevent subsequent assertions from running.\n\nNormally Ginkgo rescues this panic so you shouldn't see it.\n\nHowever, if you make an assertion in a goroutine, Ginkgo can't capture the panic.\nTo circumvent this, you should call\n\n\tdefer GinkgoRecover()\n\nat the top of the goroutine that caused this panic.\n\nAlternatively, you may have made an assertion outside of a Ginkgo\nleaf node (e.g. in a container node or some out-of-band function) - please move your assertion to\nan appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).", DocLink:"mental-model-how-ginkgo-handles-failure", CodeLocation:types.CodeLocation{FileName:"test/e2e/framework/pod/exec_util.go", LineNumber:126, FullStackTrace:"k8s.io/kubernetes/test/e2e/framework/pod.execCommandInPodWithFullOutput(0x771f0dc?, {0xc001ac69f0, 0x12}, {0xc004de3da0, 0x3, 0x3})\n\ttest/e2e/framework/pod/exec_util.go:126 +0x133\nk8s.io/kubernetes/test/e2e/framework/pod.ExecShellInPodWithFullOutput(...)\n\ttest/e2e/framework/pod/exec_util.go:138\nk8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).GetHTTPCodeFromTestContainer(0xc000b7eb60, {0x75c17ef, 0x8}, {0xc0012788a0?, 0x1?}, 0x3?)\n\ttest/e2e/framework/network/utils.go:420 +0x1a5\nk8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer.func1()\n\ttest/e2e/network/service.go:689 +0x72\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0})\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0xc00162de80?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001ac7dd0, 0x2fdb16a?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x68?, 0x2fd9d05?, 0x38?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0xc0004f78a8?, 0xc0004f78b8?, 0x262a967?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x5a?, 0x0?, 0x1f91?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50\nk8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer(0xc000b7eb60, {0xc0012788a0, 0xa}, 0x76cf, 0x0?, 0x0, 0x2)\n\ttest/e2e/network/service.go:705 +0x139\nk8s.io/kubernetes/test/e2e/network.glob..func20.3.1()\n\ttest/e2e/network/loadbalancer.go:1287 +0x188\npanic({0x70eb7e0, 0xc0000f8620})\n\t/usr/local/go/src/runtime/panic.go:884 +0x212\nk8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?})\n\ttest/e2e/framework/log.go:61 +0x145\nk8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?})\n\ttest/e2e/framework/expect.go:76 +0x267\nk8s.io/kubernetes/test/e2e/framework.ExpectNoError(...)\n\ttest/e2e/framework/expect.go:43\nk8s.io/kubernetes/test/e2e/network.glob..func20.3()\n\ttest/e2e/network/loadbalancer.go:1305 +0x288", CustomMessage:""}} (�[1m�[38;5;9mYour Test Panicked�[0m �[38;5;243mtest/e2e/framework/pod/exec_util.go:126�[0m When you, or your assertion library, calls Ginkgo's Fail(), Ginkgo panics to prevent subsequent assertions from running. Normally Ginkgo rescues this panic so you shouldn't see it. However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. To circumvent this, you should call defer GinkgoRecover() at the top of the goroutine that caused this panic. Alternatively, you may have made an assertion outside of a Ginkgo leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...). �[1mLearn more at:�[0m �[38;5;14m�[4mhttp://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure�[0m ) goroutine 2660 [running]: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime.logPanic({0x70eb7e0?, 0xc004906850}) vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:75 +0x99 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime.HandleCrash({0x0, 0x0, 0xc004906850?}) vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:49 +0x75 panic({0x70eb7e0, 0xc004906850}) /usr/local/go/src/runtime/panic.go:884 +0x212 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc000c69e00, 0xbf}, {0xc0004f73b0?, 0x75b521a?, 0xc0004f73d0?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 +0x225 k8s.io/kubernetes/test/e2e/framework.Fail({0xc0048eea50, 0xaa}, {0xc0004f7448?, 0xc0041fb170?, 0xc0004f7470?}) test/e2e/framework/log.go:61 +0x145 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fadf60, 0xc0046ffe30}, {0xc0048c3b60?, 0x0?, 0x0?}) test/e2e/framework/expect.go:76 +0x267 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 k8s.io/kubernetes/test/e2e/framework/pod.execCommandInPodWithFullOutput(0x771f0dc?, {0xc001ac69f0, 0x12}, {0xc004de3da0, 0x3, 0x3}) test/e2e/framework/pod/exec_util.go:126 +0x133 k8s.io/kubernetes/test/e2e/framework/pod.ExecShellInPodWithFullOutput(...) test/e2e/framework/pod/exec_util.go:138 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).GetHTTPCodeFromTestContainer(0xc000b7eb60, {0x75c17ef, 0x8}, {0xc0012788a0?, 0x1?}, 0x3?) test/e2e/framework/network/utils.go:420 +0x1a5 k8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer.func1() test/e2e/network/service.go:689 +0x72 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0xc00162de80?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc001ac7dd0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x68?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0xc0004f78a8?, 0xc0004f78b8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x5a?, 0x0?, 0x1f91?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50 k8s.io/kubernetes/test/e2e/network.testHTTPHealthCheckNodePortFromTestContainer(0xc000b7eb60, {0xc0012788a0, 0xa}, 0x76cf, 0x0?, 0x0, 0x2) test/e2e/network/service.go:705 +0x139 k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1287 +0x188 panic({0x70eb7e0, 0xc0000f8620}) /usr/local/go/src/runtime/panic.go:884 +0x212 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc002a74080, 0x38}, {0xc003e0bd58?, 0x75b521a?, 0xc003e0bd78?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 +0x225 k8s.io/kubernetes/test/e2e/framework.Fail({0x76740e9, 0x23}, {0xc003e0bdf0?, 0x76740e9?, 0xc003e0be18?}) test/e2e/framework/log.go:61 +0x145 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000205d60}, {0x0?, 0x75c4cc0?, 0x9?}) test/e2e/framework/expect.go:76 +0x267 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1305 +0x288 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001b38600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 +0x1b k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 +0x98 created by k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 +0xe3d [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 08:58:36.668: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 Nov 26 08:58:36.708: INFO: Output of kubectl describe svc: Nov 26 08:58:36.708: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-851 describe svc --namespace=esipp-851' Nov 26 08:58:36.820: INFO: rc: 1 Nov 26 08:58:36.820: INFO: [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:58:36.82 STEP: Collecting events from namespace "esipp-851". 11/26/22 08:58:36.82 Nov 26 08:58:36.859: INFO: Unexpected error: failed to list events in namespace "esipp-851": <*url.Error | 0xc004e6c210>: { Op: "Get", URL: "https://34.83.96.51/api/v1/namespaces/esipp-851/events", Err: <*net.OpError | 0xc004e1e6e0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc00493a8a0>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 83, 96, 51], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc004e23e60>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 08:58:36.860: FAIL: failed to list events in namespace "esipp-851": Get "https://34.83.96.51/api/v1/namespaces/esipp-851/events": dial tcp 34.83.96.51:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc0004f65c0, {0xc000df1780, 0x9}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc001b7eb60}, {0xc000df1780, 0x9}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc0004f6650?, {0xc000df1780?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc0012e2000) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc005387d50?, 0x0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc005387d50?, 0x0?}, {0xae73300?, 0x0?, 0x0?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "esipp-851" for this suite. 11/26/22 08:58:36.86 Nov 26 08:58:36.900: FAIL: Couldn't delete ns: "esipp-851": Delete "https://34.83.96.51/api/v1/namespaces/esipp-851": dial tcp 34.83.96.51:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.83.96.51/api/v1/namespaces/esipp-851", Err:(*net.OpError)(0xc0028594a0)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc0012e2000) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc005387cd0?, 0xc0043c2fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc005387cd0?, 0x0?}, {0xae73300?, 0x5?, 0xc004e0b990?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfor\stype\=NodePort$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000a4bc20) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func20.2() test/e2e/network/loadbalancer.go:1262 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:52:43.841 Nov 26 08:52:43.841: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 08:52:43.842 Nov 26 08:52:43.882: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:45.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:47.921: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:49.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:51.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:53.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:55.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:57.921: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:52:59.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:01.921: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:03.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:05.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:07.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:09.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:11.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:13.922: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:13.961: INFO: Unexpected error while creating namespace: Post "https://34.83.96.51/api/v1/namespaces": dial tcp 34.83.96.51:443: connect: connection refused Nov 26 08:53:13.961: INFO: Unexpected error: <*errors.errorString | 0xc0002419e0>: { s: "timed out waiting for the condition", } Nov 26 08:53:13.961: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000a4bc20) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 08:53:13.961: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:53:14 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfrom\spods$'
test/e2e/network/loadbalancer.go:1476 k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1476 +0xabdfrom junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:51:21.992 Nov 26 08:51:21.992: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 08:51:21.994 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 08:51:22.755 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 08:51:22.884 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should work from pods test/e2e/network/loadbalancer.go:1422 STEP: creating a service esipp-3250/external-local-pods with type=LoadBalancer 11/26/22 08:51:23.141 STEP: setting ExternalTrafficPolicy=Local 11/26/22 08:51:23.141 STEP: waiting for loadbalancer for service esipp-3250/external-local-pods 11/26/22 08:51:23.3 Nov 26 08:51:23.301: INFO: Waiting up to 15m0s for service "external-local-pods" to have a LoadBalancer STEP: creating a pod to be part of the service external-local-pods 11/26/22 08:51:57.488 Nov 26 08:51:57.581: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 08:51:57.659: INFO: Found 0/1 pods - will retry Nov 26 08:51:59.729: INFO: Found all 1 pods Nov 26 08:51:59.729: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [external-local-pods-7f2ql] Nov 26 08:51:59.729: INFO: Waiting up to 2m0s for pod "external-local-pods-7f2ql" in namespace "esipp-3250" to be "running and ready" Nov 26 08:51:59.863: INFO: Pod "external-local-pods-7f2ql": Phase="Running", Reason="", readiness=true. Elapsed: 133.282907ms Nov 26 08:51:59.863: INFO: Pod "external-local-pods-7f2ql" satisfied condition "running and ready" Nov 26 08:51:59.863: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [external-local-pods-7f2ql] STEP: waiting for loadbalancer for service esipp-3250/external-local-pods 11/26/22 08:51:59.863 Nov 26 08:51:59.863: INFO: Waiting up to 15m0s for service "external-local-pods" to have a LoadBalancer STEP: Creating pause pod deployment to make sure, pausePods are in desired state 11/26/22 08:52:00.009 Nov 26 08:52:00.273: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:0, Replicas:0, UpdatedReplicas:0, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:0, Conditions:[]v1.DeploymentCondition(nil), CollisionCount:(*int32)(nil)} Nov 26 08:52:02.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2022, time.November, 26, 8, 52, 0, 0, time.Local), LastTransitionTime:time.Date(2022, time.November, 26, 8, 52, 0, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2022, time.November, 26, 8, 52, 0, 0, time.Local), LastTransitionTime:time.Date(2022, time.November, 26, 8, 52, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"pause-pod-deployment-7c665f9d5d\" is progressing."}}, CollisionCount:(*int32)(nil)} Nov 26 08:52:04.457: INFO: Waiting up to 5m0s curl 35.197.57.135:80/clientip STEP: Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx 11/26/22 08:52:04.51 Nov 26 08:52:04.510: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:04.915: INFO: rc: 1 Nov 26 08:52:04.916: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:06.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:07.548: INFO: rc: 1 Nov 26 08:52:07.548: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:08.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:09.370: INFO: rc: 1 Nov 26 08:52:09.370: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:10.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:11.464: INFO: rc: 1 Nov 26 08:52:11.464: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:12.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:13.344: INFO: rc: 1 Nov 26 08:52:13.344: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:14.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:15.371: INFO: rc: 1 Nov 26 08:52:15.371: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:16.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:17.570: INFO: rc: 1 Nov 26 08:52:17.570: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:18.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:19.353: INFO: rc: 1 Nov 26 08:52:19.353: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:20.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:21.388: INFO: rc: 1 Nov 26 08:52:21.388: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:22.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:23.682: INFO: rc: 1 Nov 26 08:52:23.682: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:24.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:25.387: INFO: rc: 1 Nov 26 08:52:25.387: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:26.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:27.402: INFO: rc: 1 Nov 26 08:52:27.402: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:28.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:29.465: INFO: rc: 1 Nov 26 08:52:29.465: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:30.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:31.522: INFO: rc: 1 Nov 26 08:52:31.522: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:32.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:33.340: INFO: rc: 1 Nov 26 08:52:33.340: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:34.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:35.443: INFO: rc: 1 Nov 26 08:52:35.443: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:36.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:37.472: INFO: rc: 1 Nov 26 08:52:37.472: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:38.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:39.382: INFO: rc: 1 Nov 26 08:52:39.382: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:41.445: INFO: rc: 1 Nov 26 08:52:41.445: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:52:42.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:43.047: INFO: rc: 1 Nov 26 08:52:43.047: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:52:44.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:45.041: INFO: rc: 1 Nov 26 08:52:45.041: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:52:46.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:47.045: INFO: rc: 1 Nov 26 08:52:47.046: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:52:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:49.027: INFO: rc: 1 Nov 26 08:52:49.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:52:50.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:51.024: INFO: rc: 1 Nov 26 08:52:51.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:52:52.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:53.026: INFO: rc: 1 Nov 26 08:52:53.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:52:54.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:55.028: INFO: rc: 1 Nov 26 08:52:55.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:52:56.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:57.026: INFO: rc: 1 Nov 26 08:52:57.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:52:58.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:52:59.034: INFO: rc: 1 Nov 26 08:52:59.034: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:00.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:01.025: INFO: rc: 1 Nov 26 08:53:01.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:02.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:03.023: INFO: rc: 1 Nov 26 08:53:03.023: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:04.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:05.027: INFO: rc: 1 Nov 26 08:53:05.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:06.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:07.026: INFO: rc: 1 Nov 26 08:53:07.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:08.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:09.024: INFO: rc: 1 Nov 26 08:53:09.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:10.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:11.027: INFO: rc: 1 Nov 26 08:53:11.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:12.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:13.058: INFO: rc: 1 Nov 26 08:53:13.058: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:14.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:15.027: INFO: rc: 1 Nov 26 08:53:15.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:16.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:17.025: INFO: rc: 1 Nov 26 08:53:17.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:18.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:19.026: INFO: rc: 1 Nov 26 08:53:19.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:20.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:21.029: INFO: rc: 1 Nov 26 08:53:21.029: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:22.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:23.028: INFO: rc: 1 Nov 26 08:53:23.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:24.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:25.029: INFO: rc: 1 Nov 26 08:53:25.029: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:26.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:27.030: INFO: rc: 1 Nov 26 08:53:27.030: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:28.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:29.027: INFO: rc: 1 Nov 26 08:53:29.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:30.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:35.024: INFO: rc: 1 Nov 26 08:53:35.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:53:36.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:37.029: INFO: rc: 1 Nov 26 08:53:37.029: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:38.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:39.033: INFO: rc: 1 Nov 26 08:53:39.033: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:41.028: INFO: rc: 1 Nov 26 08:53:41.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:42.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:43.026: INFO: rc: 1 Nov 26 08:53:43.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:44.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:45.028: INFO: rc: 1 Nov 26 08:53:45.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:46.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:47.035: INFO: rc: 1 Nov 26 08:53:47.035: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:48.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:49.028: INFO: rc: 1 Nov 26 08:53:49.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:50.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:51.027: INFO: rc: 1 Nov 26 08:53:51.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:52.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:53.029: INFO: rc: 1 Nov 26 08:53:53.029: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:54.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:55.035: INFO: rc: 1 Nov 26 08:53:55.035: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:56.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:57.027: INFO: rc: 1 Nov 26 08:53:57.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:53:58.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:53:59.029: INFO: rc: 1 Nov 26 08:53:59.029: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:00.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:01.034: INFO: rc: 1 Nov 26 08:54:01.034: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:02.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:03.029: INFO: rc: 1 Nov 26 08:54:03.029: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:04.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:05.024: INFO: rc: 1 Nov 26 08:54:05.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:06.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:07.028: INFO: rc: 1 Nov 26 08:54:07.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:08.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:09.028: INFO: rc: 1 Nov 26 08:54:09.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:10.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:11.030: INFO: rc: 1 Nov 26 08:54:11.030: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:12.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:13.023: INFO: rc: 1 Nov 26 08:54:13.023: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:14.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:15.023: INFO: rc: 1 Nov 26 08:54:15.023: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:16.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:17.030: INFO: rc: 1 Nov 26 08:54:17.030: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:18.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:19.028: INFO: rc: 1 Nov 26 08:54:19.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:20.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:21.033: INFO: rc: 1 Nov 26 08:54:21.033: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:22.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:23.028: INFO: rc: 1 Nov 26 08:54:23.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:24.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:25.028: INFO: rc: 1 Nov 26 08:54:25.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:26.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:27.031: INFO: rc: 1 Nov 26 08:54:27.031: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:28.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:29.027: INFO: rc: 1 Nov 26 08:54:29.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:30.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:31.025: INFO: rc: 1 Nov 26 08:54:31.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:32.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:33.027: INFO: rc: 1 Nov 26 08:54:33.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:54:34.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:37.655: INFO: rc: 1 Nov 26 08:54:37.656: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:54:38.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:39.248: INFO: rc: 1 Nov 26 08:54:39.248: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:54:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:41.250: INFO: rc: 1 Nov 26 08:54:41.250: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:54:42.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:43.814: INFO: rc: 7 Nov 26 08:54:43.814: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:54:44.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:45.469: INFO: rc: 7 Nov 26 08:54:45.469: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:54:46.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:47.537: INFO: rc: 7 Nov 26 08:54:47.537: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:54:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:49.431: INFO: rc: 7 Nov 26 08:54:49.431: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:54:50.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:51.433: INFO: rc: 7 Nov 26 08:54:51.433: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:54:52.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:53.439: INFO: rc: 7 Nov 26 08:54:53.439: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:54:54.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:55.445: INFO: rc: 7 Nov 26 08:54:55.445: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:54:56.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:57.494: INFO: rc: 7 Nov 26 08:54:57.494: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:54:58.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:54:59.485: INFO: rc: 7 Nov 26 08:54:59.485: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:00.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:01.449: INFO: rc: 7 Nov 26 08:55:01.449: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:02.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:03.433: INFO: rc: 7 Nov 26 08:55:03.433: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:04.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:05.433: INFO: rc: 7 Nov 26 08:55:05.433: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:06.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:07.447: INFO: rc: 7 Nov 26 08:55:07.447: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:08.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:09.453: INFO: rc: 7 Nov 26 08:55:09.453: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:10.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:11.274: INFO: rc: 1 Nov 26 08:55:11.274: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:12.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:13.260: INFO: rc: 1 Nov 26 08:55:13.260: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:14.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:15.261: INFO: rc: 1 Nov 26 08:55:15.261: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:16.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:17.272: INFO: rc: 1 Nov 26 08:55:17.272: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:18.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:19.266: INFO: rc: 1 Nov 26 08:55:19.266: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:20.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:21.265: INFO: rc: 1 Nov 26 08:55:21.266: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:22.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:23.261: INFO: rc: 1 Nov 26 08:55:23.261: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:24.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:25.266: INFO: rc: 1 Nov 26 08:55:25.266: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:26.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:27.276: INFO: rc: 1 Nov 26 08:55:27.276: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:28.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:29.257: INFO: rc: 1 Nov 26 08:55:29.257: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:30.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:31.299: INFO: rc: 1 Nov 26 08:55:31.299: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:32.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:33.254: INFO: rc: 1 Nov 26 08:55:33.254: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:34.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:35.367: INFO: rc: 1 Nov 26 08:55:35.367: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-pause") error: exit status 1, retry until timeout Nov 26 08:55:36.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:37.793: INFO: rc: 7 Nov 26 08:55:37.793: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:38.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:39.926: INFO: rc: 7 Nov 26 08:55:39.927: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:41.459: INFO: rc: 7 Nov 26 08:55:41.459: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:42.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:43.437: INFO: rc: 7 Nov 26 08:55:43.437: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:44.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:45.471: INFO: rc: 7 Nov 26 08:55:45.471: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:46.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:47.467: INFO: rc: 7 Nov 26 08:55:47.467: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:49.433: INFO: rc: 7 Nov 26 08:55:49.433: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:50.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:51.447: INFO: rc: 7 Nov 26 08:55:51.447: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:52.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:53.670: INFO: rc: 7 Nov 26 08:55:53.670: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:54.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:55.484: INFO: rc: 7 Nov 26 08:55:55.484: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:56.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:57.514: INFO: rc: 7 Nov 26 08:55:57.514: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:55:58.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:55:59.473: INFO: rc: 7 Nov 26 08:55:59.473: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:00.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:01.517: INFO: rc: 7 Nov 26 08:56:01.517: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:02.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:03.573: INFO: rc: 7 Nov 26 08:56:03.573: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:04.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:05.430: INFO: rc: 7 Nov 26 08:56:05.430: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:06.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:07.819: INFO: rc: 7 Nov 26 08:56:07.819: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:08.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:09.658: INFO: rc: 7 Nov 26 08:56:09.658: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:10.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:11.577: INFO: rc: 7 Nov 26 08:56:11.577: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:12.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:13.695: INFO: rc: 7 Nov 26 08:56:13.695: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:14.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:15.562: INFO: rc: 7 Nov 26 08:56:15.563: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:16.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:17.740: INFO: rc: 7 Nov 26 08:56:17.740: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:18.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:19.572: INFO: rc: 7 Nov 26 08:56:19.572: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:20.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:21.587: INFO: rc: 7 Nov 26 08:56:21.587: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:22.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 5m1.149s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 5m0s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 4m18.632s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000aebe40?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:56:23.646: INFO: rc: 7 Nov 26 08:56:23.646: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:24.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:25.541: INFO: rc: 7 Nov 26 08:56:25.541: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:26.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:27.807: INFO: rc: 7 Nov 26 08:56:27.807: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:28.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:29.552: INFO: rc: 7 Nov 26 08:56:29.552: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:30.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:31.588: INFO: rc: 7 Nov 26 08:56:31.588: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:32.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:33.607: INFO: rc: 7 Nov 26 08:56:33.607: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:34.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:35.506: INFO: rc: 7 Nov 26 08:56:35.506: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:36.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:37.737: INFO: rc: 7 Nov 26 08:56:37.737: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:38.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:39.609: INFO: rc: 7 Nov 26 08:56:39.609: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:41.899: INFO: rc: 7 Nov 26 08:56:41.899: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:42.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 5m21.152s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 5m20.003s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 4m38.634s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000346840?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:56:43.621: INFO: rc: 7 Nov 26 08:56:43.621: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:44.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:45.574: INFO: rc: 7 Nov 26 08:56:45.574: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:46.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:47.702: INFO: rc: 7 Nov 26 08:56:47.702: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:49.691: INFO: rc: 7 Nov 26 08:56:49.691: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:50.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:51.717: INFO: rc: 7 Nov 26 08:56:51.717: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:52.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:53.628: INFO: rc: 7 Nov 26 08:56:53.628: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:54.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:55.573: INFO: rc: 7 Nov 26 08:56:55.573: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:56.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:57.817: INFO: rc: 7 Nov 26 08:56:57.817: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:56:58.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:56:59.682: INFO: rc: 7 Nov 26 08:56:59.682: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:57:00.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:01.593: INFO: rc: 7 Nov 26 08:57:01.593: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: + curl -q -s --connect-timeout 30 35.197.57.135:80/clientip command terminated with exit code 7 error: exit status 7, retry until timeout Nov 26 08:57:02.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 5m41.155s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 5m40.006s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 4m58.637s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc002d1c420?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:57:03.367: INFO: rc: 1 Nov 26 08:57:03.367: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:04.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:05.305: INFO: rc: 1 Nov 26 08:57:05.305: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:06.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:07.389: INFO: rc: 1 Nov 26 08:57:07.389: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:08.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:09.355: INFO: rc: 1 Nov 26 08:57:09.355: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:10.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:11.367: INFO: rc: 1 Nov 26 08:57:11.367: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:12.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:13.339: INFO: rc: 1 Nov 26 08:57:13.339: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:14.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:15.304: INFO: rc: 1 Nov 26 08:57:15.304: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:16.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:17.378: INFO: rc: 1 Nov 26 08:57:17.378: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:18.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:19.328: INFO: rc: 1 Nov 26 08:57:19.328: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:20.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:21.436: INFO: rc: 1 Nov 26 08:57:21.436: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:22.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 6m1.157s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 6m0.008s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 5m18.64s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc002d1c6e0?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:57:23.429: INFO: rc: 1 Nov 26 08:57:23.429: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:24.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:25.301: INFO: rc: 1 Nov 26 08:57:25.301: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:26.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:27.424: INFO: rc: 1 Nov 26 08:57:27.424: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:28.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:29.429: INFO: rc: 1 Nov 26 08:57:29.429: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:30.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:31.372: INFO: rc: 1 Nov 26 08:57:31.372: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:32.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:33.355: INFO: rc: 1 Nov 26 08:57:33.355: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:34.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:35.300: INFO: rc: 1 Nov 26 08:57:35.300: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:36.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:37.350: INFO: rc: 1 Nov 26 08:57:37.350: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:38.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:39.351: INFO: rc: 1 Nov 26 08:57:39.351: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:40.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:41.315: INFO: rc: 1 Nov 26 08:57:41.315: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:42.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 6m21.16s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 6m20.011s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 5m38.642s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000be7b80?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:57:43.326: INFO: rc: 1 Nov 26 08:57:43.326: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:44.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:45.366: INFO: rc: 1 Nov 26 08:57:45.366: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:46.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:47.385: INFO: rc: 1 Nov 26 08:57:47.385: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:49.325: INFO: rc: 1 Nov 26 08:57:49.325: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:50.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:51.358: INFO: rc: 1 Nov 26 08:57:51.358: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:52.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:53.336: INFO: rc: 1 Nov 26 08:57:53.336: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:54.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:55.310: INFO: rc: 1 Nov 26 08:57:55.311: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:56.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:57.403: INFO: rc: 1 Nov 26 08:57:57.403: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:57:58.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:57:59.352: INFO: rc: 1 Nov 26 08:57:59.352: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:00.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:01.404: INFO: rc: 1 Nov 26 08:58:01.404: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:02.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 6m41.162s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 6m40.013s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 5m58.645s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000aea9a0?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:58:03.412: INFO: rc: 1 Nov 26 08:58:03.412: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:04.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:05.327: INFO: rc: 1 Nov 26 08:58:05.327: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:06.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:07.369: INFO: rc: 1 Nov 26 08:58:07.370: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:08.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:09.368: INFO: rc: 1 Nov 26 08:58:09.368: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:10.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:11.477: INFO: rc: 1 Nov 26 08:58:11.477: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:12.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:13.437: INFO: rc: 1 Nov 26 08:58:13.437: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:14.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:15.360: INFO: rc: 1 Nov 26 08:58:15.360: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:16.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:17.419: INFO: rc: 1 Nov 26 08:58:17.419: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:18.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:19.319: INFO: rc: 1 Nov 26 08:58:19.319: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:20.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:21.431: INFO: rc: 1 Nov 26 08:58:21.431: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:22.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 7m1.165s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 7m0.016s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 6m18.648s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000561760?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:58:23.341: INFO: rc: 1 Nov 26 08:58:23.341: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:24.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:25.301: INFO: rc: 1 Nov 26 08:58:25.301: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:26.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:27.348: INFO: rc: 1 Nov 26 08:58:27.348: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:28.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:29.306: INFO: rc: 1 Nov 26 08:58:29.306: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:30.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:31.308: INFO: rc: 1 Nov 26 08:58:31.308: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:32.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:33.359: INFO: rc: 1 Nov 26 08:58:33.359: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:34.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:35.315: INFO: rc: 1 Nov 26 08:58:35.315: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 08:58:36.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:37.035: INFO: rc: 1 Nov 26 08:58:37.035: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:38.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:39.024: INFO: rc: 1 Nov 26 08:58:39.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:41.052: INFO: rc: 1 Nov 26 08:58:41.052: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:42.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:43.030: INFO: rc: 1 Nov 26 08:58:43.030: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 7m21.168s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 7m20.019s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 6m38.65s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:58:44.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:45.025: INFO: rc: 1 Nov 26 08:58:45.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:46.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:47.028: INFO: rc: 1 Nov 26 08:58:47.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:49.028: INFO: rc: 1 Nov 26 08:58:49.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:50.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:51.023: INFO: rc: 1 Nov 26 08:58:51.023: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:52.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:53.027: INFO: rc: 1 Nov 26 08:58:53.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:54.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:55.030: INFO: rc: 1 Nov 26 08:58:55.030: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:56.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:57.025: INFO: rc: 1 Nov 26 08:58:57.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:58:58.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:58:59.024: INFO: rc: 1 Nov 26 08:58:59.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:00.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:01.027: INFO: rc: 1 Nov 26 08:59:01.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:02.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:03.027: INFO: rc: 1 Nov 26 08:59:03.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 7m41.17s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 7m40.021s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 6m58.652s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:59:04.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:05.025: INFO: rc: 1 Nov 26 08:59:05.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:06.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:07.026: INFO: rc: 1 Nov 26 08:59:07.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:08.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:09.026: INFO: rc: 1 Nov 26 08:59:09.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:10.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:11.027: INFO: rc: 1 Nov 26 08:59:11.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:12.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:13.028: INFO: rc: 1 Nov 26 08:59:13.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:14.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:15.030: INFO: rc: 1 Nov 26 08:59:15.030: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:16.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:17.028: INFO: rc: 1 Nov 26 08:59:17.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:18.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:19.028: INFO: rc: 1 Nov 26 08:59:19.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:20.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:21.025: INFO: rc: 1 Nov 26 08:59:21.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:22.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:23.027: INFO: rc: 1 Nov 26 08:59:23.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 8m1.173s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 8m0.024s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 7m18.655s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:59:24.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:25.027: INFO: rc: 1 Nov 26 08:59:25.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:26.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:27.028: INFO: rc: 1 Nov 26 08:59:27.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:28.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:29.024: INFO: rc: 1 Nov 26 08:59:29.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:30.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:31.026: INFO: rc: 1 Nov 26 08:59:31.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:32.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:33.024: INFO: rc: 1 Nov 26 08:59:33.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:34.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:35.027: INFO: rc: 1 Nov 26 08:59:35.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:36.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:37.026: INFO: rc: 1 Nov 26 08:59:37.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:38.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:39.027: INFO: rc: 1 Nov 26 08:59:39.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:41.027: INFO: rc: 1 Nov 26 08:59:41.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:42.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:43.027: INFO: rc: 1 Nov 26 08:59:43.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 8m21.176s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 8m20.027s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 7m38.658s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:59:44.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:45.027: INFO: rc: 1 Nov 26 08:59:45.027: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:46.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:47.026: INFO: rc: 1 Nov 26 08:59:47.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:49.026: INFO: rc: 1 Nov 26 08:59:49.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:50.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:51.026: INFO: rc: 1 Nov 26 08:59:51.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:52.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:53.025: INFO: rc: 1 Nov 26 08:59:53.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:54.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:55.023: INFO: rc: 1 Nov 26 08:59:55.023: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:56.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:57.026: INFO: rc: 1 Nov 26 08:59:57.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 08:59:58.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 08:59:59.026: INFO: rc: 1 Nov 26 08:59:59.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:00.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:01.023: INFO: rc: 1 Nov 26 09:00:01.023: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:02.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:03.024: INFO: rc: 1 Nov 26 09:00:03.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 8m41.179s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 8m40.03s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 7m58.661s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 09:00:04.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:05.026: INFO: rc: 1 Nov 26 09:00:05.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:06.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:07.024: INFO: rc: 1 Nov 26 09:00:07.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:08.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:09.025: INFO: rc: 1 Nov 26 09:00:09.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:10.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:11.030: INFO: rc: 1 Nov 26 09:00:11.030: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:12.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:13.032: INFO: rc: 1 Nov 26 09:00:13.032: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:14.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:15.025: INFO: rc: 1 Nov 26 09:00:15.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:16.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:17.028: INFO: rc: 1 Nov 26 09:00:17.028: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:18.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:19.024: INFO: rc: 1 Nov 26 09:00:19.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:20.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:21.026: INFO: rc: 1 Nov 26 09:00:21.026: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:22.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:23.024: INFO: rc: 1 Nov 26 09:00:23.024: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 9m1.181s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 9m0.032s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 8m18.663s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 09:00:24.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:25.025: INFO: rc: 1 Nov 26 09:00:25.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:26.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:27.025: INFO: rc: 1 Nov 26 09:00:27.025: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:28.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:29.029: INFO: rc: 1 Nov 26 09:00:29.029: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: The connection to the server 34.83.96.51 was refused - did you specify the right host or port? error: exit status 1, retry until timeout Nov 26 09:00:30.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:33.980: INFO: rc: 1 Nov 26 09:00:33.980: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 09:00:34.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:35.245: INFO: rc: 1 Nov 26 09:00:35.245: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 09:00:36.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:37.254: INFO: rc: 1 Nov 26 09:00:37.254: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 09:00:38.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:39.258: INFO: rc: 1 Nov 26 09:00:39.258: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 09:00:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:41.256: INFO: rc: 1 Nov 26 09:00:41.256: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 09:00:42.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 9m21.183s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 9m20.034s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 8m38.665s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000be66e0?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 09:00:43.245: INFO: rc: 1 Nov 26 09:00:43.245: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 09:00:44.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:45.246: INFO: rc: 1 Nov 26 09:00:45.246: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 09:00:46.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:47.255: INFO: rc: 1 Nov 26 09:00:47.255: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 09:00:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:49.437: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:00:49.437: INFO: stdout: "10.64.3.217:48176" Nov 26 09:00:50.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:51.447: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:00:51.447: INFO: stdout: "10.64.3.217:48178" Nov 26 09:00:52.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:53.424: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:00:53.424: INFO: stdout: "10.64.3.217:48180" Nov 26 09:00:54.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:55.440: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:00:55.440: INFO: stdout: "10.64.3.217:48182" Nov 26 09:00:56.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:57.457: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:00:57.457: INFO: stdout: "10.64.3.217:43006" Nov 26 09:00:58.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:00:59.433: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:00:59.433: INFO: stdout: "10.64.3.217:43008" Nov 26 09:01:00.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:01.449: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:01.449: INFO: stdout: "10.64.3.217:43010" Nov 26 09:01:02.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 9m41.185s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 9m40.036s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 8m58.667s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000347b80?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010001?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 09:01:03.427: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:03.427: INFO: stdout: "10.64.3.217:43012" Nov 26 09:01:04.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:05.434: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:05.434: INFO: stdout: "10.64.3.217:43014" Nov 26 09:01:06.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:07.464: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:07.464: INFO: stdout: "10.64.3.217:43134" Nov 26 09:01:08.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:09.434: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:09.434: INFO: stdout: "10.64.3.217:43136" Nov 26 09:01:10.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:11.470: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:11.470: INFO: stdout: "10.64.3.217:43138" Nov 26 09:01:12.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:13.439: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:13.439: INFO: stdout: "10.64.3.217:43140" Nov 26 09:01:14.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:15.439: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:15.439: INFO: stdout: "10.64.3.217:43142" Nov 26 09:01:16.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:17.447: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:17.447: INFO: stdout: "10.64.3.217:49094" Nov 26 09:01:18.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:19.427: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:19.427: INFO: stdout: "10.64.3.217:49096" Nov 26 09:01:20.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:21.450: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:21.450: INFO: stdout: "10.64.3.217:49098" Nov 26 09:01:22.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 10m1.188s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 10m0.039s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 9m18.67s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000d16b00?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010001?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 09:01:23.433: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:23.433: INFO: stdout: "10.64.3.217:49100" Nov 26 09:01:24.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:25.436: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:25.436: INFO: stdout: "10.64.3.217:49102" Nov 26 09:01:26.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:27.441: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:27.441: INFO: stdout: "10.64.3.217:56996" Nov 26 09:01:28.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:29.508: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:29.508: INFO: stdout: "10.64.3.217:56998" Nov 26 09:01:30.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:31.494: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:31.494: INFO: stdout: "10.64.3.217:57000" Nov 26 09:01:32.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:33.433: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:33.433: INFO: stdout: "10.64.3.217:57002" Nov 26 09:01:34.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:35.477: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:35.477: INFO: stdout: "10.64.3.217:57004" Nov 26 09:01:36.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:37.558: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:37.558: INFO: stdout: "10.64.3.217:49280" Nov 26 09:01:38.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:39.433: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:39.433: INFO: stdout: "10.64.3.217:49282" Nov 26 09:01:40.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:41.452: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:41.452: INFO: stdout: "10.64.3.217:49284" Nov 26 09:01:42.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 10m21.19s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 10m20.041s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 9m38.673s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000aeab00?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010001?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 09:01:43.448: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:43.448: INFO: stdout: "10.64.3.217:49286" Nov 26 09:01:44.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:45.431: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:45.431: INFO: stdout: "10.64.3.217:49290" Nov 26 09:01:46.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:47.430: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:47.430: INFO: stdout: "10.64.3.217:43164" Nov 26 09:01:48.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:49.438: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:49.438: INFO: stdout: "10.64.3.217:43166" Nov 26 09:01:50.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:51.456: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:51.456: INFO: stdout: "10.64.3.217:43168" Nov 26 09:01:52.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:53.419: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:53.419: INFO: stdout: "10.64.3.217:43170" Nov 26 09:01:54.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:55.441: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:55.441: INFO: stdout: "10.64.3.217:43172" Nov 26 09:01:56.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:57.432: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:57.432: INFO: stdout: "10.64.3.217:54470" Nov 26 09:01:58.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:01:59.430: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:01:59.430: INFO: stdout: "10.64.3.217:54472" Nov 26 09:02:00.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:02:01.443: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:02:01.443: INFO: stdout: "10.64.3.217:54474" Nov 26 09:02:02.917: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' ------------------------------ Progress Report for Ginkgo Process #7 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 10m41.193s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 10m40.045s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 35.197.57.135 from pod pause-pod-deployment-7c665f9d5d-vbtlb on node bootstrap-e2e-minion-group-s7dx (Step Runtime: 9m58.676s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 1860 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000d174a0?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc000e79130?, 0x1?}, {0xc0006f7ad8?, 0x101010001?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0023c0288, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0xc0006f7d00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00025b280?, 0x78?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0028edc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 09:02:03.434: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:02:03.434: INFO: stdout: "10.64.3.217:54476" Nov 26 09:02:04.916: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:02:05.435: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:02:05.435: INFO: stdout: "10.64.3.217:54478" Nov 26 09:02:05.435: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 exec pause-pod-deployment-7c665f9d5d-vbtlb -- /bin/sh -x -c curl -q -s --connect-timeout 30 35.197.57.135:80/clientip' Nov 26 09:02:05.943: INFO: stderr: "+ curl -q -s --connect-timeout 30 35.197.57.135:80/clientip\n" Nov 26 09:02:05.943: INFO: stdout: "10.64.3.217:54480" Nov 26 09:02:05.943: FAIL: Source IP not preserved from pause-pod-deployment-7c665f9d5d-vbtlb, expected '10.64.3.196' got '10.64.3.217' Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1476 +0xabd Nov 26 09:02:05.943: INFO: Deleting deployment Nov 26 09:02:06.225: INFO: Waiting up to 15m0s for service "external-local-pods" to have no LoadBalancer [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 09:02:16.490: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 Nov 26 09:02:16.583: INFO: Output of kubectl describe svc: Nov 26 09:02:16.584: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=esipp-3250 describe svc --namespace=esipp-3250' Nov 26 09:02:16.906: INFO: stderr: "" Nov 26 09:02:16.906: INFO: stdout: "Name: external-local-pods\nNamespace: esipp-3250\nLabels: testid=external-local-pods-9f48f68f-2725-4b7f-9cbd-d40eb5d0d564\nAnnotations: <none>\nSelector: testid=external-local-pods-9f48f68f-2725-4b7f-9cbd-d40eb5d0d564\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.220.73\nIPs: 10.0.220.73\nPort: <unset> 80/TCP\nTargetPort: 80/TCP\nEndpoints: 10.64.2.228:80\nSession Affinity: None\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal EnsuringLoadBalancer 10m service-controller Ensuring load balancer\n Normal EnsuredLoadBalancer 10m service-controller Ensured load balancer\n Normal EnsuringLoadBalancer 6m14s service-controller Ensuring load balancer\n Normal EnsuredLoadBalancer 6m10s service-controller Ensured load balancer\n" Nov 26 09:02:16.906: INFO: Name: external-local-pods Namespace: esipp-3250 Labels: testid=external-local-pods-9f48f68f-2725-4b7f-9cbd-d40eb5d0d564 Annotations: <none> Selector: testid=external-local-pods-9f48f68f-2725-4b7f-9cbd-d40eb5d0d564 Type: ClusterIP IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.220.73 IPs: 10.0.220.73 Port: <unset> 80/TCP TargetPort: 80/TCP Endpoints: 10.64.2.228:80 Session Affinity: None Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal EnsuringLoadBalancer 10m service-controller Ensuring load balancer Normal EnsuredLoadBalancer 10m service-controller Ensured load balancer Normal EnsuringLoadBalancer 6m14s service-controller Ensuring load balancer Normal EnsuredLoadBalancer 6m10s service-controller Ensured load balancer [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 09:02:16.907 STEP: Collecting events from namespace "esipp-3250". 11/26/22 09:02:16.907 STEP: Found 23 events. 11/26/22 09:02:16.952 Nov 26 09:02:16.952: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for external-local-pods-7f2ql: { } Scheduled: Successfully assigned esipp-3250/external-local-pods-7f2ql to bootstrap-e2e-minion-group-lz41 Nov 26 09:02:16.952: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for pause-pod-deployment-7c665f9d5d-vbtlb: { } Scheduled: Successfully assigned esipp-3250/pause-pod-deployment-7c665f9d5d-vbtlb to bootstrap-e2e-minion-group-s7dx Nov 26 09:02:16.952: INFO: At 2022-11-26 08:51:23 +0000 UTC - event for external-local-pods: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 09:02:16.952: INFO: At 2022-11-26 08:51:57 +0000 UTC - event for external-local-pods: {service-controller } EnsuredLoadBalancer: Ensured load balancer Nov 26 09:02:16.952: INFO: At 2022-11-26 08:51:57 +0000 UTC - event for external-local-pods: {replication-controller } SuccessfulCreate: Created pod: external-local-pods-7f2ql Nov 26 09:02:16.952: INFO: At 2022-11-26 08:51:58 +0000 UTC - event for external-local-pods-7f2ql: {kubelet bootstrap-e2e-minion-group-lz41} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 09:02:16.952: INFO: At 2022-11-26 08:51:58 +0000 UTC - event for external-local-pods-7f2ql: {kubelet bootstrap-e2e-minion-group-lz41} Created: Created container netexec Nov 26 09:02:16.952: INFO: At 2022-11-26 08:51:58 +0000 UTC - event for external-local-pods-7f2ql: {kubelet bootstrap-e2e-minion-group-lz41} Started: Started container netexec Nov 26 09:02:16.952: INFO: At 2022-11-26 08:51:59 +0000 UTC - event for external-local-pods-7f2ql: {kubelet bootstrap-e2e-minion-group-lz41} Killing: Stopping container netexec Nov 26 09:02:16.952: INFO: At 2022-11-26 08:52:00 +0000 UTC - event for external-local-pods-7f2ql: {kubelet bootstrap-e2e-minion-group-lz41} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 09:02:16.952: INFO: At 2022-11-26 08:52:00 +0000 UTC - event for pause-pod-deployment: {deployment-controller } ScalingReplicaSet: Scaled up replica set pause-pod-deployment-7c665f9d5d to 1 Nov 26 09:02:16.952: INFO: At 2022-11-26 08:52:00 +0000 UTC - event for pause-pod-deployment-7c665f9d5d: {replicaset-controller } SuccessfulCreate: Created pod: pause-pod-deployment-7c665f9d5d-vbtlb Nov 26 09:02:16.952: INFO: At 2022-11-26 08:52:01 +0000 UTC - event for pause-pod-deployment-7c665f9d5d-vbtlb: {kubelet bootstrap-e2e-minion-group-s7dx} FailedMount: MountVolume.SetUp failed for volume "kube-api-access-hrgrk" : failed to sync configmap cache: timed out waiting for the condition Nov 26 09:02:16.952: INFO: At 2022-11-26 08:52:02 +0000 UTC - event for pause-pod-deployment-7c665f9d5d-vbtlb: {kubelet bootstrap-e2e-minion-group-s7dx} Created: Created container agnhost-pause Nov 26 09:02:16.952: INFO: At 2022-11-26 08:52:02 +0000 UTC - event for pause-pod-deployment-7c665f9d5d-vbtlb: {kubelet bootstrap-e2e-minion-group-s7dx} Started: Started container agnhost-pause Nov 26 09:02:16.952: INFO: At 2022-11-26 08:52:02 +0000 UTC - event for pause-pod-deployment-7c665f9d5d-vbtlb: {kubelet bootstrap-e2e-minion-group-s7dx} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 09:02:16.952: INFO: At 2022-11-26 08:52:03 +0000 UTC - event for external-local-pods-7f2ql: {kubelet bootstrap-e2e-minion-group-lz41} BackOff: Back-off restarting failed container netexec in pod external-local-pods-7f2ql_esipp-3250(3678305d-0b58-4887-8d59-ca7d166c40df) Nov 26 09:02:16.952: INFO: At 2022-11-26 08:53:33 +0000 UTC - event for pause-pod-deployment-7c665f9d5d-vbtlb: {kubelet bootstrap-e2e-minion-group-s7dx} Killing: Stopping container agnhost-pause Nov 26 09:02:16.952: INFO: At 2022-11-26 08:53:34 +0000 UTC - event for pause-pod-deployment-7c665f9d5d-vbtlb: {kubelet bootstrap-e2e-minion-group-s7dx} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 09:02:16.952: INFO: At 2022-11-26 08:53:37 +0000 UTC - event for external-local-pods-7f2ql: {kubelet bootstrap-e2e-minion-group-lz41} Unhealthy: Readiness probe failed: Get "http://10.64.2.208:80/hostName": dial tcp 10.64.2.208:80: connect: connection refused Nov 26 09:02:16.952: INFO: At 2022-11-26 08:53:37 +0000 UTC - event for pause-pod-deployment-7c665f9d5d-vbtlb: {kubelet bootstrap-e2e-minion-group-s7dx} BackOff: Back-off restarting failed container agnhost-pause in pod pause-pod-deployment-7c665f9d5d-vbtlb_esipp-3250(4792d511-1f12-4a5f-9c87-2dfb174c74e9) Nov 26 09:02:16.952: INFO: At 2022-11-26 08:56:02 +0000 UTC - event for external-local-pods: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 09:02:16.952: INFO: At 2022-11-26 08:56:06 +0000 UTC - event for external-local-pods: {service-controller } EnsuredLoadBalancer: Ensured load balancer Nov 26 09:02:16.994: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 09:02:16.994: INFO: external-local-pods-7f2ql bootstrap-e2e-minion-group-lz41 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:51:57 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:57:40 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:57:40 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:51:57 +0000 UTC }] Nov 26 09:02:16.994: INFO: pause-pod-deployment-7c665f9d5d-vbtlb bootstrap-e2e-minion-group-s7dx Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:52:00 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 09:02:15 +0000 UTC ContainersNotReady containers with unready status: [agnhost-pause]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 09:02:15 +0000 UTC ContainersNotReady containers with unready status: [agnhost-pause]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:52:00 +0000 UTC }] Nov 26 09:02:16.994: INFO: Nov 26 09:02:17.195: INFO: Logging node info for node bootstrap-e2e-master Nov 26 09:02:17.237: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master ba8d154d-f7d1-4d02-b950-a084eb625244 14441 0 2022-11-26 08:28:16 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 09:00:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.83.96.51,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bf41ff823483c389ad7dfd0c1ce16b06,SystemUUID:bf41ff82-3483-c389-ad7d-fd0c1ce16b06,BootID:5db3fc62-e7bb-4715-a04e-2bdf5328dbc8,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 09:02:17.238: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 09:02:17.282: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 09:02:17.345: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.345: INFO: Container kube-apiserver ready: true, restart count 5 Nov 26 09:02:17.345: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-26 08:27:49 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.345: INFO: Container kube-addon-manager ready: true, restart count 6 Nov 26 09:02:17.345: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-26 08:27:49 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.345: INFO: Container l7-lb-controller ready: false, restart count 8 Nov 26 09:02:17.345: INFO: metadata-proxy-v0.1-xx7th started at 2022-11-26 08:28:23 +0000 UTC (0+2 container statuses recorded) Nov 26 09:02:17.345: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 09:02:17.345: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 09:02:17.345: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.345: INFO: Container etcd-container ready: true, restart count 3 Nov 26 09:02:17.345: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.345: INFO: Container etcd-container ready: true, restart count 3 Nov 26 09:02:17.345: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.345: INFO: Container konnectivity-server-container ready: true, restart count 3 Nov 26 09:02:17.345: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.345: INFO: Container kube-controller-manager ready: true, restart count 11 Nov 26 09:02:17.345: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-26 08:27:32 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.345: INFO: Container kube-scheduler ready: false, restart count 6 Nov 26 09:02:17.552: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 09:02:17.552: INFO: Logging node info for node bootstrap-e2e-minion-group-327c Nov 26 09:02:17.596: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-327c 2792e2db-d60b-4a1a-b593-202ac7a81c7e 14512 0 2022-11-26 08:28:14 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-327c kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-327c topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-6566":"bootstrap-e2e-minion-group-327c","csi-hostpath-provisioning-3311":"bootstrap-e2e-minion-group-327c","csi-mock-csi-mock-volumes-4099":"bootstrap-e2e-minion-group-327c","csi-mock-csi-mock-volumes-5232":"bootstrap-e2e-minion-group-327c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:56:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 09:00:38 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 09:01:30 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-327c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:38 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:38 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:38 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 09:00:38 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 09:00:38 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 09:00:38 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 09:00:38 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:24 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:35 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:35 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:35 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 09:00:35 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.233.8,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:d5273c9e67ce602a784d93fca00549e5,SystemUUID:d5273c9e-67ce-602a-784d-93fca00549e5,BootID:6b605594-03f1-4a39-9bc1-bb9fc688da43,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692,DevicePath:,},},Config:nil,},} Nov 26 09:02:17.597: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-327c Nov 26 09:02:17.640: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-327c Nov 26 09:02:17.751: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:52:05 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 09:02:17.751: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 09:02:17.751: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 09:02:17.751: INFO: Container hostpath ready: true, restart count 0 Nov 26 09:02:17.751: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 09:02:17.751: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 09:02:17.751: INFO: l7-default-backend-8549d69d99-b5jrs started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 09:02:17.751: INFO: konnectivity-agent-mmmgd started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container konnectivity-agent ready: true, restart count 9 Nov 26 09:02:17.751: INFO: csi-mockplugin-0 started at 2022-11-26 08:37:21 +0000 UTC (0+4 container statuses recorded) Nov 26 09:02:17.751: INFO: Container busybox ready: false, restart count 6 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container driver-registrar ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container mock ready: false, restart count 7 Nov 26 09:02:17.751: INFO: metadata-proxy-v0.1-w74pw started at 2022-11-26 08:28:15 +0000 UTC (0+2 container statuses recorded) Nov 26 09:02:17.751: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 09:02:17.751: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 09:02:17.751: INFO: csi-mockplugin-0 started at 2022-11-26 08:48:49 +0000 UTC (0+3 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 09:02:17.751: INFO: Container driver-registrar ready: true, restart count 4 Nov 26 09:02:17.751: INFO: Container mock ready: true, restart count 4 Nov 26 09:02:17.751: INFO: netserver-0 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container webserver ready: true, restart count 9 Nov 26 09:02:17.751: INFO: netserver-0 started at 2022-11-26 08:57:15 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container webserver ready: true, restart count 1 Nov 26 09:02:17.751: INFO: kube-dns-autoscaler-5f6455f985-tnj96 started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container autoscaler ready: false, restart count 9 Nov 26 09:02:17.751: INFO: pvc-volume-tester-6957f started at 2022-11-26 08:33:17 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container volume-tester ready: false, restart count 0 Nov 26 09:02:17.751: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:37:11 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-attacher ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container csi-resizer ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container csi-snapshotter ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container hostpath ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container liveness-probe ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container node-driver-registrar ready: false, restart count 7 Nov 26 09:02:17.751: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:52:20 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container hostpath ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 09:02:17.751: INFO: kube-proxy-bootstrap-e2e-minion-group-327c started at 2022-11-26 08:28:14 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container kube-proxy ready: false, restart count 9 Nov 26 09:02:17.751: INFO: coredns-6d97d5ddb-cz84m started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container coredns ready: false, restart count 10 Nov 26 09:02:17.751: INFO: coredns-6d97d5ddb-q6tzt started at 2022-11-26 08:28:28 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container coredns ready: false, restart count 10 Nov 26 09:02:17.751: INFO: pod-43d27f70-d941-4117-b96d-c563fc43297f started at 2022-11-26 08:39:32 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container write-pod ready: false, restart count 0 Nov 26 09:02:17.751: INFO: volume-snapshot-controller-0 started at 2022-11-26 08:28:24 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container volume-snapshot-controller ready: false, restart count 11 Nov 26 09:02:17.751: INFO: pod-d4f0fe4d-227a-40e8-929b-a033a1faef35 started at 2022-11-26 08:39:35 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container write-pod ready: false, restart count 0 Nov 26 09:02:17.751: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:48:35 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 09:02:17.751: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 09:02:17.751: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 09:02:17.751: INFO: Container hostpath ready: false, restart count 6 Nov 26 09:02:17.751: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 09:02:17.751: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 09:02:17.751: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:47:40 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container hostpath ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 09:02:17.751: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 09:02:17.751: INFO: csi-mockplugin-0 started at 2022-11-26 08:30:43 +0000 UTC (0+3 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: false, restart count 9 Nov 26 09:02:17.751: INFO: Container driver-registrar ready: false, restart count 9 Nov 26 09:02:17.751: INFO: Container mock ready: false, restart count 9 Nov 26 09:02:17.751: INFO: csi-mockplugin-0 started at 2022-11-26 08:34:51 +0000 UTC (0+4 container statuses recorded) Nov 26 09:02:17.751: INFO: Container busybox ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 09:02:17.751: INFO: Container driver-registrar ready: false, restart count 9 Nov 26 09:02:17.751: INFO: Container mock ready: false, restart count 9 Nov 26 09:02:17.751: INFO: csi-mockplugin-0 started at 2022-11-26 08:31:07 +0000 UTC (0+4 container statuses recorded) Nov 26 09:02:17.751: INFO: Container busybox ready: false, restart count 9 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: true, restart count 9 Nov 26 09:02:17.751: INFO: Container driver-registrar ready: true, restart count 9 Nov 26 09:02:17.751: INFO: Container mock ready: true, restart count 9 Nov 26 09:02:17.751: INFO: hostexec-bootstrap-e2e-minion-group-327c-6xj2s started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container agnhost-container ready: false, restart count 5 Nov 26 09:02:17.751: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:48:02 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-attacher ready: true, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-resizer ready: true, restart count 5 Nov 26 09:02:17.751: INFO: Container csi-snapshotter ready: true, restart count 5 Nov 26 09:02:17.751: INFO: Container hostpath ready: true, restart count 5 Nov 26 09:02:17.751: INFO: Container liveness-probe ready: true, restart count 5 Nov 26 09:02:17.751: INFO: Container node-driver-registrar ready: true, restart count 5 Nov 26 09:02:17.751: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 08:30:43 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-attacher ready: true, restart count 8 Nov 26 09:02:17.751: INFO: csi-mockplugin-0 started at 2022-11-26 08:30:43 +0000 UTC (0+3 container statuses recorded) Nov 26 09:02:17.751: INFO: Container csi-provisioner ready: true, restart count 9 Nov 26 09:02:17.751: INFO: Container driver-registrar ready: false, restart count 9 Nov 26 09:02:17.751: INFO: Container mock ready: true, restart count 9 Nov 26 09:02:18.010: INFO: Latency metrics for node bootstrap-e2e-minion-group-327c Nov 26 09:02:18.010: INFO: Logging node info for node bootstrap-e2e-minion-group-lz41 Nov 26 09:02:18.055: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-lz41 2e433af5-3311-4285-97fa-cde6a9a5b261 14432 0 2022-11-26 08:28:20 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-lz41 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-lz41 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-8230":"bootstrap-e2e-minion-group-lz41","csi-hostpath-provisioning-9560":"bootstrap-e2e-minion-group-lz41"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:48:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 09:00:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status} {node-problem-detector Update v1 2022-11-26 09:00:37 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-lz41,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 09:00:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 09:00:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 09:00:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 09:00:37 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 09:00:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 09:00:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.83.179.153,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:aa52222b7f092a36e5e364be7d47d224,SystemUUID:aa52222b-7f09-2a36-e5e3-64be7d47d224,BootID:74a831b5-c273-4958-8b03-2d43808117f5,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9 kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},},Config:nil,},} Nov 26 09:02:18.055: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-lz41 Nov 26 09:02:18.099: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-lz41 Nov 26 09:02:18.155: INFO: pod-6d904d9d-47d9-4612-b9ba-75195733a8e3 started at 2022-11-26 08:32:05 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container write-pod ready: false, restart count 0 Nov 26 09:02:18.155: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:47:44 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:18.155: INFO: Container csi-attacher ready: true, restart count 5 Nov 26 09:02:18.155: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 09:02:18.155: INFO: Container csi-resizer ready: true, restart count 5 Nov 26 09:02:18.155: INFO: Container csi-snapshotter ready: true, restart count 5 Nov 26 09:02:18.155: INFO: Container hostpath ready: true, restart count 5 Nov 26 09:02:18.155: INFO: Container liveness-probe ready: true, restart count 5 Nov 26 09:02:18.155: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 09:02:18.155: INFO: konnectivity-agent-8v4r5 started at 2022-11-26 08:28:34 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container konnectivity-agent ready: true, restart count 9 Nov 26 09:02:18.155: INFO: netserver-1 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container webserver ready: true, restart count 10 Nov 26 09:02:18.155: INFO: pod-secrets-237c2934-d670-464d-9497-3fea99e7afae started at 2022-11-26 08:35:10 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 09:02:18.155: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:47:14 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:18.155: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 09:02:18.155: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 09:02:18.155: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 09:02:18.155: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 09:02:18.155: INFO: Container hostpath ready: true, restart count 4 Nov 26 09:02:18.155: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 09:02:18.155: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 09:02:18.155: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:31:57 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:18.155: INFO: Container csi-attacher ready: false, restart count 8 Nov 26 09:02:18.155: INFO: Container csi-provisioner ready: false, restart count 8 Nov 26 09:02:18.155: INFO: Container csi-resizer ready: false, restart count 8 Nov 26 09:02:18.155: INFO: Container csi-snapshotter ready: false, restart count 8 Nov 26 09:02:18.155: INFO: Container hostpath ready: false, restart count 8 Nov 26 09:02:18.155: INFO: Container liveness-probe ready: false, restart count 8 Nov 26 09:02:18.155: INFO: Container node-driver-registrar ready: false, restart count 8 Nov 26 09:02:18.155: INFO: external-local-pods-7f2ql started at 2022-11-26 08:51:57 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container netexec ready: true, restart count 5 Nov 26 09:02:18.155: INFO: pod-configmaps-e34354ab-e827-4e6b-a435-5ad3a6953665 started at 2022-11-26 08:50:10 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 09:02:18.155: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:52:21 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:18.155: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 09:02:18.155: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 09:02:18.155: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 09:02:18.155: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 09:02:18.155: INFO: Container hostpath ready: false, restart count 6 Nov 26 09:02:18.155: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 09:02:18.155: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 09:02:18.155: INFO: metadata-proxy-v0.1-gtpkq started at 2022-11-26 08:28:22 +0000 UTC (0+2 container statuses recorded) Nov 26 09:02:18.155: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 09:02:18.155: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 09:02:18.155: INFO: metrics-server-v0.5.2-867b8754b9-q5chn started at 2022-11-26 08:28:48 +0000 UTC (0+2 container statuses recorded) Nov 26 09:02:18.155: INFO: Container metrics-server ready: false, restart count 10 Nov 26 09:02:18.155: INFO: Container metrics-server-nanny ready: false, restart count 10 Nov 26 09:02:18.155: INFO: back-off-cap started at 2022-11-26 08:33:28 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container back-off-cap ready: false, restart count 10 Nov 26 09:02:18.155: INFO: pod-back-off-image started at 2022-11-26 08:36:35 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container back-off ready: false, restart count 9 Nov 26 09:02:18.155: INFO: kube-proxy-bootstrap-e2e-minion-group-lz41 started at 2022-11-26 08:28:20 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container kube-proxy ready: false, restart count 9 Nov 26 09:02:18.155: INFO: pod-secrets-532ad663-62f4-49e0-b24f-023151be8cd0 started at 2022-11-26 08:48:20 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 09:02:18.155: INFO: netserver-1 started at 2022-11-26 08:57:15 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.155: INFO: Container webserver ready: false, restart count 3 Nov 26 09:02:18.422: INFO: Latency metrics for node bootstrap-e2e-minion-group-lz41 Nov 26 09:02:18.422: INFO: Logging node info for node bootstrap-e2e-minion-group-s7dx Nov 26 09:02:18.465: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-s7dx 94f7d8ed-1dd6-4a3f-9454-b3d54cd8c750 14518 0 2022-11-26 08:28:22 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-s7dx kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-s7dx topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-5957":"bootstrap-e2e-minion-group-s7dx","csi-hostpath-volumemode-3424":"bootstrap-e2e-minion-group-s7dx","csi-mock-csi-mock-volumes-3757":"csi-mock-csi-mock-volumes-3757","csi-mock-csi-mock-volumes-9990":"bootstrap-e2e-minion-group-s7dx"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:52:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 09:00:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 09:01:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-s7dx,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 09:00:39 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:57:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:57:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:57:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:57:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.127.51.136,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-s7dx.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7318864e323f36191a9cc6aee4e5582d,SystemUUID:7318864e-323f-3619-1a9c-c6aee4e5582d,BootID:247dfbca-e301-45ca-b5e7-bc2da79a6926,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 09:02:18.465: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-s7dx Nov 26 09:02:18.509: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-s7dx Nov 26 09:02:18.564: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-fjbz2 started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container agnhost-container ready: false, restart count 5 Nov 26 09:02:18.564: INFO: csi-mockplugin-0 started at 2022-11-26 08:31:19 +0000 UTC (0+4 container statuses recorded) Nov 26 09:02:18.564: INFO: Container busybox ready: true, restart count 8 Nov 26 09:02:18.564: INFO: Container csi-provisioner ready: false, restart count 9 Nov 26 09:02:18.564: INFO: Container driver-registrar ready: true, restart count 9 Nov 26 09:02:18.564: INFO: Container mock ready: true, restart count 9 Nov 26 09:02:18.564: INFO: pod-645ca7c3-0782-49c3-9f8c-fe2716c177bc started at 2022-11-26 08:39:40 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container write-pod ready: false, restart count 0 Nov 26 09:02:18.564: INFO: csi-mockplugin-0 started at 2022-11-26 08:48:01 +0000 UTC (0+3 container statuses recorded) Nov 26 09:02:18.564: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 09:02:18.564: INFO: Container driver-registrar ready: true, restart count 4 Nov 26 09:02:18.564: INFO: Container mock ready: true, restart count 3 Nov 26 09:02:18.564: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-bb2lb started at 2022-11-26 08:38:41 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container agnhost-container ready: true, restart count 7 Nov 26 09:02:18.564: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:52:00 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:18.564: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 09:02:18.564: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 09:02:18.564: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 09:02:18.564: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 09:02:18.564: INFO: Container hostpath ready: true, restart count 0 Nov 26 09:02:18.564: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 09:02:18.564: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 09:02:18.564: INFO: konnectivity-agent-m6kcz started at 2022-11-26 08:28:34 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container konnectivity-agent ready: false, restart count 9 Nov 26 09:02:18.564: INFO: test-container-pod started at 2022-11-26 08:57:38 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container webserver ready: true, restart count 0 Nov 26 09:02:18.564: INFO: pod-6fb046f8-5358-4deb-960a-bec3c718a5d1 started at 2022-11-26 08:47:33 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container write-pod ready: false, restart count 0 Nov 26 09:02:18.564: INFO: kube-proxy-bootstrap-e2e-minion-group-s7dx started at 2022-11-26 08:28:22 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container kube-proxy ready: false, restart count 9 Nov 26 09:02:18.564: INFO: pod-subpath-test-inlinevolume-4dbw started at 2022-11-26 08:39:23 +0000 UTC (1+2 container statuses recorded) Nov 26 09:02:18.564: INFO: Init container init-volume-inlinevolume-4dbw ready: true, restart count 0 Nov 26 09:02:18.564: INFO: Container test-container-subpath-inlinevolume-4dbw ready: false, restart count 8 Nov 26 09:02:18.564: INFO: Container test-container-volume-inlinevolume-4dbw ready: true, restart count 8 Nov 26 09:02:18.564: INFO: hostexec-bootstrap-e2e-minion-group-s7dx-8q696 started at 2022-11-26 08:39:23 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container agnhost-container ready: true, restart count 7 Nov 26 09:02:18.564: INFO: netserver-2 started at 2022-11-26 08:32:54 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container webserver ready: true, restart count 7 Nov 26 09:02:18.564: INFO: csi-hostpathplugin-0 started at 2022-11-26 08:51:35 +0000 UTC (0+7 container statuses recorded) Nov 26 09:02:18.564: INFO: Container csi-attacher ready: false, restart count 3 Nov 26 09:02:18.564: INFO: Container csi-provisioner ready: false, restart count 3 Nov 26 09:02:18.564: INFO: Container csi-resizer ready: false, restart count 3 Nov 26 09:02:18.564: INFO: Container csi-snapshotter ready: false, restart count 3 Nov 26 09:02:18.564: INFO: Container hostpath ready: false, restart count 3 Nov 26 09:02:18.564: INFO: Container liveness-probe ready: false, restart count 3 Nov 26 09:02:18.564: INFO: Container node-driver-registrar ready: false, restart count 3 Nov 26 09:02:18.564: INFO: pause-pod-deployment-7c665f9d5d-vbtlb started at 2022-11-26 08:52:00 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container agnhost-pause ready: false, restart count 4 Nov 26 09:02:18.564: INFO: csi-mockplugin-0 started at 2022-11-26 08:49:20 +0000 UTC (0+4 container statuses recorded) Nov 26 09:02:18.564: INFO: Container busybox ready: true, restart count 3 Nov 26 09:02:18.564: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 09:02:18.564: INFO: Container driver-registrar ready: true, restart count 3 Nov 26 09:02:18.564: INFO: Container mock ready: true, restart count 3 Nov 26 09:02:18.564: INFO: external-local-lb-kvgl4 started at 2022-11-26 08:51:20 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container netexec ready: false, restart count 5 Nov 26 09:02:18.564: INFO: netserver-2 started at 2022-11-26 08:57:15 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container webserver ready: false, restart count 1 Nov 26 09:02:18.564: INFO: metadata-proxy-v0.1-m5q9x started at 2022-11-26 08:28:23 +0000 UTC (0+2 container statuses recorded) Nov 26 09:02:18.564: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 09:02:18.564: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 09:02:18.564: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 08:48:01 +0000 UTC (0+1 container statuses recorded) Nov 26 09:02:18.564: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 09:02:18.813: INFO: Latency metrics for node bootstrap-e2e-minion-group-s7dx [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "esipp-3250" for this suite. 11/26/22 09:02:18.813
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\sbe\sable\sto\schange\sthe\stype\sand\sports\sof\sa\sTCP\sservice\s\[Slow\]$'
test/e2e/framework/service/util.go:48 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc005259220, 0xc}, 0x7c85, {0xae73300, 0x0, 0x0}, 0x1?) test/e2e/framework/service/util.go:48 +0x265 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 k8s.io/kubernetes/test/e2e/network.glob..func19.3() test/e2e/network/loadbalancer.go:120 +0x465
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 08:45:06.282 Nov 26 08:45:06.282: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 08:45:06.284 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 08:45:53.202 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 08:45:53.286 [BeforeEach] [sig-network] LoadBalancers test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:65 [It] should be able to change the type and ports of a TCP service [Slow] test/e2e/network/loadbalancer.go:77 Nov 26 08:45:55.462: INFO: namespace for TCP test: loadbalancers-5461 STEP: creating a TCP service mutability-test with type=ClusterIP in namespace loadbalancers-5461 11/26/22 08:45:55.511 Nov 26 08:45:55.558: INFO: service port TCP: 80 STEP: creating a pod to be part of the TCP service mutability-test 11/26/22 08:45:55.558 Nov 26 08:45:55.607: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 08:45:55.647: INFO: Found all 1 pods Nov 26 08:45:55.648: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [mutability-test-njz4b] Nov 26 08:45:55.648: INFO: Waiting up to 2m0s for pod "mutability-test-njz4b" in namespace "loadbalancers-5461" to be "running and ready" Nov 26 08:45:55.688: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 40.754268ms Nov 26 08:45:55.688: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:45:57.766: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.118164469s Nov 26 08:45:57.766: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:45:59.740: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 4.092451705s Nov 26 08:45:59.740: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:01.774: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 6.126675499s Nov 26 08:46:01.774: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:03.828: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 8.180842038s Nov 26 08:46:03.828: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:05.740: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 10.092158277s Nov 26 08:46:05.740: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:07.747: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 12.099546421s Nov 26 08:46:07.747: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:09.748: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 14.100472652s Nov 26 08:46:09.748: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:11.738: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 16.090891589s Nov 26 08:46:11.738: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:13.738: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 18.0908964s Nov 26 08:46:13.738: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:15.749: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 20.101145684s Nov 26 08:46:15.749: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:17.743: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 22.095560334s Nov 26 08:46:17.743: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:19.779: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 24.131017834s Nov 26 08:46:19.779: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:21.761: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 26.113760485s Nov 26 08:46:21.761: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:23.758: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 28.110363002s Nov 26 08:46:23.758: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:25.745: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 30.097487568s Nov 26 08:46:25.745: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:27.745: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 32.097387012s Nov 26 08:46:27.745: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:29.762: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 34.114112528s Nov 26 08:46:29.762: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:31.749: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 36.101744306s Nov 26 08:46:31.749: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:33.763: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 38.115855568s Nov 26 08:46:33.763: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:35.738: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 40.090468159s Nov 26 08:46:35.738: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:37.749: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 42.101272026s Nov 26 08:46:37.749: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:39.746: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 44.098732162s Nov 26 08:46:39.746: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:41.762: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 46.114512319s Nov 26 08:46:41.762: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:43.746: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 48.098595689s Nov 26 08:46:43.746: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:45.753: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 50.105690015s Nov 26 08:46:45.753: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on '' to be 'Running' but was 'Pending' Nov 26 08:46:47.769: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 52.121691262s Nov 26 08:46:47.769: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on 'bootstrap-e2e-minion-group-lz41' to be 'Running' but was 'Pending' Nov 26 08:46:49.745: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 54.097785432s Nov 26 08:46:49.745: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on 'bootstrap-e2e-minion-group-lz41' to be 'Running' but was 'Pending' Nov 26 08:46:51.742: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 56.094653045s Nov 26 08:46:51.742: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on 'bootstrap-e2e-minion-group-lz41' to be 'Running' but was 'Pending' Nov 26 08:46:53.747: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 58.09909598s Nov 26 08:46:53.747: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on 'bootstrap-e2e-minion-group-lz41' to be 'Running' but was 'Pending' Nov 26 08:46:55.819: INFO: Pod "mutability-test-njz4b": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.171124637s Nov 26 08:46:55.819: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-njz4b' on 'bootstrap-e2e-minion-group-lz41' to be 'Running' but was 'Pending' Nov 26 08:46:57.817: INFO: Pod "mutability-test-njz4b": Phase="Running", Reason="", readiness=false. Elapsed: 1m2.16934923s Nov 26 08:46:57.817: INFO: Error evaluating pod condition running and ready: pod 'mutability-test-njz4b' on 'bootstrap-e2e-minion-group-lz41' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:46:47 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:46:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:46:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:46:47 +0000 UTC }] Nov 26 08:46:59.748: INFO: Pod "mutability-test-njz4b": Phase="Running", Reason="", readiness=true. Elapsed: 1m4.100177059s Nov 26 08:46:59.748: INFO: Pod "mutability-test-njz4b" satisfied condition "running and ready" Nov 26 08:46:59.748: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [mutability-test-njz4b] STEP: changing the TCP service to type=NodePort 11/26/22 08:46:59.748 Nov 26 08:46:59.943: INFO: TCP node port: 31877 STEP: hitting the TCP service's NodePort 11/26/22 08:46:59.943 Nov 26 08:46:59.943: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:46:59.983: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:01.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:02.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:03.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:04.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:05.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:06.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:07.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:08.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:09.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:10.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:11.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:12.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:13.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:14.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:15.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:16.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:17.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:18.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:19.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:20.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:21.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:22.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:23.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:24.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:25.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:26.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:27.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:28.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:29.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:30.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:31.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:32.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:33.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:34.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:35.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:36.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:37.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:38.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:39.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:40.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:41.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:42.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:43.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:44.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:45.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:46.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:47.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:48.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:49.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:50.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:51.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:52.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:53.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:54.022: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:55.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:56.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:57.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:47:58.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:47:59.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:00.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:01.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:02.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:03.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:04.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:05.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:06.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:07.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:08.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:09.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:10.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:11.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:12.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:13.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:14.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:15.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:16.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:17.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:18.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:19.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:20.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:21.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:22.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:23.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:24.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:25.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:26.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:27.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:28.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:29.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:30.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:31.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:32.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:33.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:34.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:35.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:36.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:37.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:38.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:39.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:40.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:41.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:42.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:43.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:44.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:45.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:46.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:47.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:48.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:49.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:50.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:51.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:52.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:53.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:54.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:55.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:56.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:57.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:48:58.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:48:59.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:00.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:01.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:02.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:03.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:04.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:05.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:06.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:07.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:08.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:09.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:10.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:11.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:12.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:13.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:14.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:15.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:16.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:17.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:18.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:19.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:20.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:21.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:22.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:23.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:24.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:25.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:26.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:27.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:28.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:29.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:30.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:31.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:32.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:33.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:34.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:35.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:36.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:37.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:38.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:39.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:40.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:41.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:42.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:43.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:44.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:45.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:46.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:47.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:48.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:49.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:50.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:51.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:52.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:53.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:54.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:55.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:56.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:57.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:49:58.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:49:59.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:00.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:01.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:02.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:03.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:04.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:05.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:06.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:07.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:08.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:09.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:10.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:11.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:12.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:13.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:14.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:15.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:16.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:17.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:18.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:19.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:20.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:21.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:22.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:23.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:24.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:25.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:26.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:27.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:28.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:29.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:30.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:31.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:32.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:33.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:34.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:35.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:36.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:37.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:38.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:39.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:40.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:41.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:42.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:43.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:44.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:45.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:46.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:47.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:48.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:49.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:50.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:51.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:52.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:53.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:54.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #11 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a TCP service [Slow] (Spec Runtime: 5m49.136s) test/e2e/network/loadbalancer.go:77 In [It] (Node Runtime: 5m0s) test/e2e/network/loadbalancer.go:77 At [By Step] hitting the TCP service's NodePort (Step Runtime: 3m55.475s) test/e2e/network/loadbalancer.go:119 Spec Goroutine goroutine 1562 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00013a000}, 0xc000f8c108, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00013a000}, 0xd0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc00013a000}, 0x2d?, 0xc0038bdc20?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x754e980?, 0xc004f88150?, 0x766a5c9?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc005259220, 0xc}, 0x7c85, {0xae73300, 0x0, 0x0}, 0x1?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func19.3() test/e2e/network/loadbalancer.go:120 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0044e9380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:50:55.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:56.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:57.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:50:58.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:50:59.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:00.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:01.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:02.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:03.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:04.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:05.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:06.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:07.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:08.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:09.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:10.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:11.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:12.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:13.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:14.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #11 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a TCP service [Slow] (Spec Runtime: 6m9.139s) test/e2e/network/loadbalancer.go:77 In [It] (Node Runtime: 5m20.003s) test/e2e/network/loadbalancer.go:77 At [By Step] hitting the TCP service's NodePort (Step Runtime: 4m15.478s) test/e2e/network/loadbalancer.go:119 Spec Goroutine goroutine 1562 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00013a000}, 0xc000f8c108, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00013a000}, 0xd0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc00013a000}, 0x2d?, 0xc0038bdc20?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x754e980?, 0xc004f88150?, 0x766a5c9?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc005259220, 0xc}, 0x7c85, {0xae73300, 0x0, 0x0}, 0x1?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func19.3() test/e2e/network/loadbalancer.go:120 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0044e9380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:51:15.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:16.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:17.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:18.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:19.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:20.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:21.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:22.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:23.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:24.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:25.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:26.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:27.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:28.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:29.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:30.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:31.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:32.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:33.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:34.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #11 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a TCP service [Slow] (Spec Runtime: 6m29.141s) test/e2e/network/loadbalancer.go:77 In [It] (Node Runtime: 5m40.005s) test/e2e/network/loadbalancer.go:77 At [By Step] hitting the TCP service's NodePort (Step Runtime: 4m35.481s) test/e2e/network/loadbalancer.go:119 Spec Goroutine goroutine 1562 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00013a000}, 0xc000f8c108, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00013a000}, 0xd0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc00013a000}, 0x2d?, 0xc0038bdc20?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x754e980?, 0xc004f88150?, 0x766a5c9?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc005259220, 0xc}, 0x7c85, {0xae73300, 0x0, 0x0}, 0x1?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func19.3() test/e2e/network/loadbalancer.go:120 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0044e9380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:51:35.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:36.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:37.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:38.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:39.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:40.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:41.983: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:42.022: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:43.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:44.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:45.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:46.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:47.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:48.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:49.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:50.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:51.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:52.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:53.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:54.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #11 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a TCP service [Slow] (Spec Runtime: 6m49.144s) test/e2e/network/loadbalancer.go:77 In [It] (Node Runtime: 6m0.008s) test/e2e/network/loadbalancer.go:77 At [By Step] hitting the TCP service's NodePort (Step Runtime: 4m55.483s) test/e2e/network/loadbalancer.go:119 Spec Goroutine goroutine 1562 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00013a000}, 0xc000f8c108, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00013a000}, 0xd0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc00013a000}, 0x2d?, 0xc0038bdc20?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x754e980?, 0xc004f88150?, 0x766a5c9?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc005259220, 0xc}, 0x7c85, {0xae73300, 0x0, 0x0}, 0x1?) test/e2e/framework/service/util.go:46 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 > k8s.io/kubernetes/test/e2e/network.glob..func19.3() test/e2e/network/loadbalancer.go:120 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0044e9380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 08:51:55.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:56.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:57.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:51:58.024: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:51:59.984: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:52:00.023: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:52:00.023: INFO: Poking "http://34.168.233.8:31877/echo?msg=hello" Nov 26 08:52:00.062: INFO: Poke("http://34.168.233.8:31877/echo?msg=hello"): Get "http://34.168.233.8:31877/echo?msg=hello": dial tcp 34.168.233.8:31877: connect: connection refused Nov 26 08:52:00.062: FAIL: Could not reach HTTP service through 34.168.233.8:31877 after 5m0s Full Stack Trace k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTPWithRetriableErrorCodes({0xc005259220, 0xc}, 0x7c85, {0xae73300, 0x0, 0x0}, 0x1?) test/e2e/framework/service/util.go:48 +0x265 k8s.io/kubernetes/test/e2e/framework/service.TestReachableHTTP(...) test/e2e/framework/service/util.go:29 k8s.io/kubernetes/test/e2e/network.glob..func19.3() test/e2e/network/loadbalancer.go:120 +0x465 [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 08:52:00.063: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 Nov 26 08:52:00.322: INFO: Output of kubectl describe svc: Nov 26 08:52:00.322: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.83.96.51 --kubeconfig=/workspace/.kube/config --namespace=loadbalancers-5461 describe svc --namespace=loadbalancers-5461' Nov 26 08:52:00.789: INFO: stderr: "" Nov 26 08:52:00.789: INFO: stdout: "Name: mutability-test\nNamespace: loadbalancers-5461\nLabels: testid=mutability-test-d608f6b6-bb6c-40d6-8fa5-14464336211f\nAnnotations: <none>\nSelector: testid=mutability-test-d608f6b6-bb6c-40d6-8fa5-14464336211f\nType: NodePort\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.43.91\nIPs: 10.0.43.91\nPort: <unset> 80/TCP\nTargetPort: 80/TCP\nNodePort: <unset> 31877/TCP\nEndpoints: 10.64.2.148:80\nSession Affinity: None\nExternal Traffic Policy: Cluster\nEvents: <none>\n" Nov 26 08:52:00.789: INFO: Name: mutability-test Namespace: loadbalancers-5461 Labels: testid=mutability-test-d608f6b6-bb6c-40d6-8fa5-14464336211f Annotations: <none> Selector: testid=mutability-test-d608f6b6-bb6c-40d6-8fa5-14464336211f Type: NodePort IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.43.91 IPs: 10.0.43.91 Port: <unset> 80/TCP TargetPort: 80/TCP NodePort: <unset> 31877/TCP Endpoints: 10.64.2.148:80 Session Affinity: None External Traffic Policy: Cluster Events: <none> [DeferCleanup (Each)] [sig-network] LoadBalancers test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 08:52:00.789 STEP: Collecting events from namespace "loadbalancers-5461". 11/26/22 08:52:00.789 STEP: Found 9 events. 11/26/22 08:52:00.893 Nov 26 08:52:00.893: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for mutability-test-njz4b: { } Scheduled: Successfully assigned loadbalancers-5461/mutability-test-njz4b to bootstrap-e2e-minion-group-lz41 Nov 26 08:52:00.893: INFO: At 2022-11-26 08:45:55 +0000 UTC - event for mutability-test: {replication-controller } SuccessfulCreate: Created pod: mutability-test-njz4b Nov 26 08:52:00.893: INFO: At 2022-11-26 08:46:48 +0000 UTC - event for mutability-test-njz4b: {kubelet bootstrap-e2e-minion-group-lz41} FailedMount: MountVolume.SetUp failed for volume "kube-api-access-bgwqx" : failed to sync configmap cache: timed out waiting for the condition Nov 26 08:52:00.893: INFO: At 2022-11-26 08:46:51 +0000 UTC - event for mutability-test-njz4b: {kubelet bootstrap-e2e-minion-group-lz41} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 08:52:00.893: INFO: At 2022-11-26 08:46:51 +0000 UTC - event for mutability-test-njz4b: {kubelet bootstrap-e2e-minion-group-lz41} Created: Created container netexec Nov 26 08:52:00.893: INFO: At 2022-11-26 08:46:51 +0000 UTC - event for mutability-test-njz4b: {kubelet bootstrap-e2e-minion-group-lz41} Started: Started container netexec Nov 26 08:52:00.893: INFO: At 2022-11-26 08:46:53 +0000 UTC - event for mutability-test-njz4b: {kubelet bootstrap-e2e-minion-group-lz41} Killing: Stopping container netexec Nov 26 08:52:00.893: INFO: At 2022-11-26 08:46:54 +0000 UTC - event for mutability-test-njz4b: {kubelet bootstrap-e2e-minion-group-lz41} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 08:52:00.893: INFO: At 2022-11-26 08:46:59 +0000 UTC - event for mutability-test-njz4b: {kubelet bootstrap-e2e-minion-group-lz41} BackOff: Back-off restarting failed container netexec in pod mutability-test-njz4b_loadbalancers-5461(37228a39-3518-45d4-b1ed-a7841deaf7f6) Nov 26 08:52:01.018: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 08:52:01.018: INFO: mutability-test-njz4b bootstrap-e2e-minion-group-lz41 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:46:47 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:47:13 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:47:13 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 08:46:47 +0000 UTC }] Nov 26 08:52:01.018: INFO: Nov 26 08:52:01.096: INFO: Unable to fetch loadbalancers-5461/mutability-test-njz4b/netexec logs: an error on the server ("unknown") has prevented the request from succeeding (get pods mutability-test-njz4b) Nov 26 08:52:01.200: INFO: Logging node info for node bootstrap-e2e-master Nov 26 08:52:01.261: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master ba8d154d-f7d1-4d02-b950-a084eb625244 11273 0 2022-11-26 08:28:16 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 08:49:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:49:04 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:49:04 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:49:04 +0000 UTC,LastTransitionTime:2022-11-26 08:28:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:49:04 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.83.96.51,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bf41ff823483c389ad7dfd0c1ce16b06,SystemUUID:bf41ff82-3483-c389-ad7d-fd0c1ce16b06,BootID:5db3fc62-e7bb-4715-a04e-2bdf5328dbc8,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 08:52:01.261: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 08:52:01.331: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 08:52:01.408: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-master: error trying to reach service: No agent available Nov 26 08:52:01.408: INFO: Logging node info for node bootstrap-e2e-minion-group-327c Nov 26 08:52:01.475: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-327c 2792e2db-d60b-4a1a-b593-202ac7a81c7e 12313 0 2022-11-26 08:28:14 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-327c kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-327c topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-2377":"bootstrap-e2e-minion-group-327c","csi-hostpath-multivolume-6566":"bootstrap-e2e-minion-group-327c","csi-hostpath-provisioning-1740":"bootstrap-e2e-minion-group-327c","csi-hostpath-provisioning-3014":"bootstrap-e2e-minion-group-327c","csi-mock-csi-mock-volumes-4699":"bootstrap-e2e-minion-group-327c","csi-mock-csi-mock-volumes-5232":"bootstrap-e2e-minion-group-327c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 08:28:14 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {node-problem-detector Update v1 2022-11-26 08:48:23 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-26 08:48:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 08:51:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-327c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:48:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:48:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:48:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:48:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:23 +0000 UTC,LastTransitionTime:2022-11-26 08:28:18 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:24 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:48:50 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:48:50 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:48:50 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:48:50 +0000 UTC,LastTransitionTime:2022-11-26 08:28:14 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.233.8,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-327c.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:d5273c9e67ce602a784d93fca00549e5,SystemUUID:d5273c9e-67ce-602a-784d-93fca00549e5,BootID:6b605594-03f1-4a39-9bc1-bb9fc688da43,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4099^c6936341-6d64-11ed-9c72-1a6632c0d692,DevicePath:,},},Config:nil,},} Nov 26 08:52:01.475: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-327c Nov 26 08:52:01.544: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-327c Nov 26 08:52:01.645: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-327c: error trying to reach service: No agent available Nov 26 08:52:01.645: INFO: Logging node info for node bootstrap-e2e-minion-group-lz41 Nov 26 08:52:01.708: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-lz41 2e433af5-3311-4285-97fa-cde6a9a5b261 12018 0 2022-11-26 08:28:20 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-lz41 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-lz41 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-8230":"bootstrap-e2e-minion-group-lz41","csi-hostpath-provisioning-9560":"bootstrap-e2e-minion-group-lz41"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 08:48:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 08:48:28 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 08:50:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-lz41,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:48:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:48:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:48:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:48:28 +0000 UTC,LastTransitionTime:2022-11-26 08:28:24 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:48:07 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:48:07 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:48:07 +0000 UTC,LastTransitionTime:2022-11-26 08:28:20 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:48:07 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.83.179.153,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-lz41.c.k8s-jkns-e2e-gce-reboot-1-2.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:aa52222b7f092a36e5e364be7d47d224,SystemUUID:aa52222b-7f09-2a36-e5e3-64be7d47d224,BootID:74a831b5-c273-4958-8b03-2d43808117f5,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9 kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^ccbd2ab0-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-27^cd4cf99b-6d64-11ed-941f-2e36308fc1c9,DevicePath:,},},Config:nil,},} Nov 26 08:52:01.708: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-lz41 Nov 26 08:52:01.780: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-lz41 Nov 26 08:52:01.933: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-lz41: error trying to reach service: No agent available Nov 26 08:52:01.933: INFO: Logging node info for node bootstrap-e2e-minion-group-s7dx Nov 26 08:52:02.055: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-s7dx 94f7d8ed-1dd6-4a3f-9454-b3d54cd8c750 12831 0 2022-11-26 08:28:22 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-s7dx kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-s7dx topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-5915":"bootstrap-e2e-minion-group-s7dx","csi-hostpath-provisioning-5957":"bootstrap-e2e-minion-group-s7dx","csi-hostpath-volumemode-3424":"bootstrap-e2e-minion-group-s7dx","csi-mock-csi-mock-volumes-3757":"csi-mock-csi-mock-volumes-3757"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 08:28:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 08:28:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {node-problem-detector Update v1 2022-11-26 08:48:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-26 08:51:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 08:52:01 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-e2e-gce-reboot-1-2/us-west1-b/bootstrap-e2e-minion-group-s7dx,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 08:48:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 08:48:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 08:48:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 08:48:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 08:48:31 +0000 UTC,LastTransitionTime:2022-11-26 08:28:27 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 08:28:34 +0000 UTC,LastTransitionTime:2022-11-26 08:28:34 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 08:51:52 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 08:51:52 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 08:51:52 +0000 UTC,LastTransitionTime:2022-11-26 08:28:22 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 08:51:52 +0000 UTC,LastTransitionTime:2022-11-26 08:28:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready s