go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-api\-machinery\]\sServers\swith\ssupport\sfor\sAPI\schunking\sshould\ssupport\scontinue\slisting\sfrom\sthe\slast\skey\sif\sthe\soriginal\sversion\shas\sbeen\scompacted\saway\,\sthough\sthe\slist\sis\sinconsistent\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000910780) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-api-machinery] Servers with support for API chunking set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:11:06.913 Nov 26 00:11:06.913: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename chunking 11/26/22 00:11:06.915 Nov 26 00:13:06.965: INFO: Unexpected error: <*fmt.wrapError | 0xc0014cc040>: { msg: "wait for service account \"default\" in namespace \"chunking-9244\": timed out waiting for the condition", err: <*errors.errorString | 0xc0001c9a20>{ s: "timed out waiting for the condition", }, } Nov 26 00:13:06.965: FAIL: wait for service account "default" in namespace "chunking-9244": timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000910780) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-api-machinery] Servers with support for API chunking test/e2e/framework/node/init/init.go:32 Nov 26 00:13:06.966: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] Servers with support for API chunking dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:13:07.02 STEP: Collecting events from namespace "chunking-9244". 11/26/22 00:13:07.02 STEP: Found 0 events. 11/26/22 00:13:07.061 Nov 26 00:13:07.103: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 00:13:07.103: INFO: Nov 26 00:13:07.148: INFO: Logging node info for node bootstrap-e2e-master Nov 26 00:13:07.191: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master fd5fd34c-05e8-4c7e-8cbe-bf91f0f95cea 7952 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 00:12:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.120.117,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:4d77311e15d4bb4a15d85de5a36cea94,SystemUUID:4d77311e-15d4-bb4a-15d8-5de5a36cea94,BootID:80daeaca-84b8-4927-98e9-a38242975836,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:124989749,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:57659704,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:13:07.192: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 00:13:07.236: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 00:13:07.289: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.289: INFO: Container kube-scheduler ready: true, restart count 5 Nov 26 00:13:07.289: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.289: INFO: Container kube-apiserver ready: true, restart count 2 Nov 26 00:13:07.289: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.289: INFO: Container kube-controller-manager ready: true, restart count 6 Nov 26 00:13:07.289: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.289: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:13:07.289: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.289: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:13:07.289: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.289: INFO: Container konnectivity-server-container ready: true, restart count 1 Nov 26 00:13:07.289: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.289: INFO: Container kube-addon-manager ready: true, restart count 1 Nov 26 00:13:07.289: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.289: INFO: Container l7-lb-controller ready: false, restart count 6 Nov 26 00:13:07.289: INFO: metadata-proxy-v0.1-thx76 started at 2022-11-25 23:56:34 +0000 UTC (0+2 container statuses recorded) Nov 26 00:13:07.289: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:13:07.289: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:13:07.492: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 00:13:07.492: INFO: Logging node info for node bootstrap-e2e-minion-group-4434 Nov 26 00:13:07.535: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-4434 1aba3539-104b-4667-ab07-196915781437 8040 0 2022-11-25 23:56:41 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-4434 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-4434 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1058":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2121":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-9066":"bootstrap-e2e-minion-group-4434","csi-hostpath-provisioning-8288":"bootstrap-e2e-minion-group-4434","csi-hostpath-provisioning-985":"bootstrap-e2e-minion-group-4434"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:08:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:11:46 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:12:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-4434,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:08:33 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:08:33 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:08:33 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:08:33 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.8.98,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:e4112b9ebf318dd47967311e73935166,SystemUUID:e4112b9e-bf31-8dd4-7967-311e73935166,BootID:519ea9fb-1f7c-420e-8cea-cf36b5a7caca,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},},Config:nil,},} Nov 26 00:13:07.536: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-4434 Nov 26 00:13:07.581: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-4434 Nov 26 00:13:07.724: INFO: metadata-proxy-v0.1-kdtvq started at 2022-11-25 23:56:42 +0000 UTC (0+2 container statuses recorded) Nov 26 00:13:07.724: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:13:07.724: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:13:07.724: INFO: pod-570aaca2-5565-4c62-89d3-a199c7b4ebbb started at 2022-11-25 23:58:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:13:07.724: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:02:52 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:07.724: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container hostpath ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 00:13:07.724: INFO: pod-secrets-b416252b-41f0-47a8-a1f1-2904f5649ea7 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:13:07.724: INFO: hostexec-bootstrap-e2e-minion-group-4434-9kcrr started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container agnhost-container ready: false, restart count 5 Nov 26 00:13:07.724: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:07.724: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container hostpath ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 00:13:07.724: INFO: netserver-0 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container webserver ready: true, restart count 4 Nov 26 00:13:07.724: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:07.724: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container hostpath ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 00:13:07.724: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 00:13:07.724: INFO: csi-mockplugin-0 started at 2022-11-25 23:59:02 +0000 UTC (0+3 container statuses recorded) Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 00:13:07.724: INFO: Container driver-registrar ready: false, restart count 4 Nov 26 00:13:07.724: INFO: Container mock ready: false, restart count 4 Nov 26 00:13:07.724: INFO: ss-1 started at 2022-11-26 00:01:13 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container webserver ready: true, restart count 5 Nov 26 00:13:07.724: INFO: pod-e24536f7-0c3d-44a2-ab47-cf68d9a28e12 started at 2022-11-26 00:04:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:13:07.724: INFO: nfs-server started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container nfs-server ready: true, restart count 2 Nov 26 00:13:07.724: INFO: pod-secrets-a50040d5-9c04-4844-ad21-907877e01b2f started at 2022-11-26 00:08:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:13:07.724: INFO: kube-proxy-bootstrap-e2e-minion-group-4434 started at 2022-11-25 23:56:41 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 00:13:07.724: INFO: netserver-0 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container webserver ready: false, restart count 6 Nov 26 00:13:07.724: INFO: pod-93ad783f-bd8c-43cd-b936-dc278433c338 started at 2022-11-26 00:04:42 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:13:07.724: INFO: addon-reconcile-test-qsrst started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container addon-reconcile-test ready: true, restart count 1 Nov 26 00:13:07.724: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:07.724: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 00:13:07.724: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 00:13:07.724: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 00:13:07.724: INFO: Container hostpath ready: true, restart count 4 Nov 26 00:13:07.724: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 00:13:07.724: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:13:07.724: INFO: hostexec-bootstrap-e2e-minion-group-4434-4ctv8 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:13:07.724: INFO: pvc-tester-hjwtq started at 2022-11-26 00:04:35 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:13:07.724: INFO: test-hostpath-type-f7tpn started at 2022-11-26 00:09:20 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:13:07.724: INFO: affinity-lb-esipp-transition-9jtt7 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container affinity-lb-esipp-transition ready: true, restart count 1 Nov 26 00:13:07.724: INFO: hostexec-bootstrap-e2e-minion-group-4434-x8nd2 started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:13:07.724: INFO: mutability-test-cj5nq started at 2022-11-26 00:04:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container netexec ready: false, restart count 1 Nov 26 00:13:07.724: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:30 +0000 UTC (0+4 container statuses recorded) Nov 26 00:13:07.724: INFO: Container busybox ready: false, restart count 4 Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container mock ready: false, restart count 5 Nov 26 00:13:07.724: INFO: hostexec-bootstrap-e2e-minion-group-4434-hc6kp started at 2022-11-26 00:08:02 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:13:07.724: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:07.724: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container hostpath ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 00:13:07.724: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:59:16 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:07.724: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container hostpath ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 00:13:07.724: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:13:07.724: INFO: hostexec-bootstrap-e2e-minion-group-4434-2cwpc started at 2022-11-26 00:09:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:13:07.724: INFO: failure-2 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container failure-2 ready: true, restart count 2 Nov 26 00:13:07.724: INFO: konnectivity-agent-9h6nk started at 2022-11-25 23:56:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container konnectivity-agent ready: false, restart count 6 Nov 26 00:13:07.724: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:07.724: INFO: Container csi-attacher ready: true, restart count 6 Nov 26 00:13:07.724: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:13:07.724: INFO: Container csi-resizer ready: true, restart count 6 Nov 26 00:13:07.724: INFO: Container csi-snapshotter ready: true, restart count 6 Nov 26 00:13:07.724: INFO: Container hostpath ready: true, restart count 6 Nov 26 00:13:07.724: INFO: Container liveness-probe ready: true, restart count 6 Nov 26 00:13:07.724: INFO: Container node-driver-registrar ready: true, restart count 6 Nov 26 00:13:07.724: INFO: pod-1b8bd600-f0ed-41f8-80b4-a6b12aef2c5d started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:07.724: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:13:08.074: INFO: Latency metrics for node bootstrap-e2e-minion-group-4434 Nov 26 00:13:08.074: INFO: Logging node info for node bootstrap-e2e-minion-group-51gr Nov 26 00:13:08.116: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-51gr 739f23c9-858a-495c-bf21-9f7320b53ec4 8030 0 2022-11-25 23:56:31 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-51gr kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-51gr topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-2541":"bootstrap-e2e-minion-group-51gr"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 00:09:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:11:37 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:12:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-51gr,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:39 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:33 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.82.95.192,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:c1fa84483022b650834cff54e6b41aff,SystemUUID:c1fa8448-3022-b650-834c-ff54e6b41aff,BootID:3164b9a2-246e-435e-be83-42c92c567f8b,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-2541^f0f7e720-6d1d-11ed-827c-ce22e910fefa],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-2541^f0f7e720-6d1d-11ed-827c-ce22e910fefa,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-2541^665b16fc-6d1e-11ed-827c-ce22e910fefa,DevicePath:,},},Config:nil,},} Nov 26 00:13:08.117: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-51gr Nov 26 00:13:08.164: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-51gr Nov 26 00:13:08.235: INFO: l7-default-backend-8549d69d99-97xrr started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 00:13:08.235: INFO: netserver-1 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container webserver ready: false, restart count 7 Nov 26 00:13:08.235: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:08.235: INFO: Container csi-attacher ready: false, restart count 7 Nov 26 00:13:08.235: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 00:13:08.235: INFO: Container csi-resizer ready: false, restart count 7 Nov 26 00:13:08.235: INFO: Container csi-snapshotter ready: false, restart count 7 Nov 26 00:13:08.235: INFO: Container hostpath ready: false, restart count 7 Nov 26 00:13:08.235: INFO: Container liveness-probe ready: false, restart count 7 Nov 26 00:13:08.235: INFO: Container node-driver-registrar ready: false, restart count 7 Nov 26 00:13:08.235: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container csi-attacher ready: false, restart count 1 Nov 26 00:13:08.235: INFO: coredns-6d97d5ddb-7cmct started at 2022-11-25 23:56:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container coredns ready: false, restart count 7 Nov 26 00:13:08.235: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:00:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:08.235: INFO: Container csi-attacher ready: false, restart count 3 Nov 26 00:13:08.235: INFO: Container csi-provisioner ready: false, restart count 3 Nov 26 00:13:08.235: INFO: Container csi-resizer ready: false, restart count 3 Nov 26 00:13:08.235: INFO: Container csi-snapshotter ready: false, restart count 3 Nov 26 00:13:08.235: INFO: Container hostpath ready: false, restart count 3 Nov 26 00:13:08.235: INFO: Container liveness-probe ready: false, restart count 3 Nov 26 00:13:08.235: INFO: Container node-driver-registrar ready: false, restart count 3 Nov 26 00:13:08.235: INFO: hostexec-bootstrap-e2e-minion-group-51gr-jkx6v started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:13:08.235: INFO: pod-subpath-test-preprovisionedpv-zqzf started at 2022-11-26 00:04:41 +0000 UTC (1+2 container statuses recorded) Nov 26 00:13:08.235: INFO: Init container init-volume-preprovisionedpv-zqzf ready: true, restart count 4 Nov 26 00:13:08.235: INFO: Container test-container-subpath-preprovisionedpv-zqzf ready: false, restart count 5 Nov 26 00:13:08.235: INFO: Container test-container-volume-preprovisionedpv-zqzf ready: false, restart count 5 Nov 26 00:13:08.235: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:08.235: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:13:08.235: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:13:08.235: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:13:08.235: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:13:08.235: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:13:08.235: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:13:08.235: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:13:08.235: INFO: pvc-volume-tester-d9qcf started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:13:08.235: INFO: kube-dns-autoscaler-5f6455f985-7kdrd started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container autoscaler ready: false, restart count 6 Nov 26 00:13:08.235: INFO: netserver-1 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container webserver ready: false, restart count 4 Nov 26 00:13:08.235: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:13:08.235: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:13:08.235: INFO: Container driver-registrar ready: true, restart count 1 Nov 26 00:13:08.235: INFO: Container mock ready: true, restart count 1 Nov 26 00:13:08.235: INFO: csi-mockplugin-attacher-0 started at 2022-11-25 23:58:36 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:13:08.235: INFO: pvc-volume-tester-d2gcf started at 2022-11-26 00:06:59 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container volume-tester ready: true, restart count 0 Nov 26 00:13:08.235: INFO: hostexec-bootstrap-e2e-minion-group-51gr-gncwt started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:13:08.235: INFO: affinity-lb-esipp-transition-gjpq2 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container affinity-lb-esipp-transition ready: true, restart count 1 Nov 26 00:13:08.235: INFO: kube-proxy-bootstrap-e2e-minion-group-51gr started at 2022-11-25 23:56:31 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 00:13:08.235: INFO: coredns-6d97d5ddb-6vx5m started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container coredns ready: false, restart count 6 Nov 26 00:13:08.235: INFO: konnectivity-agent-sg59x started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container konnectivity-agent ready: true, restart count 6 Nov 26 00:13:08.235: INFO: csi-mockplugin-0 started at 2022-11-25 23:58:36 +0000 UTC (0+3 container statuses recorded) Nov 26 00:13:08.235: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:13:08.235: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 00:13:08.235: INFO: Container mock ready: false, restart count 6 Nov 26 00:13:08.235: INFO: metadata-proxy-v0.1-9xnlr started at 2022-11-25 23:56:32 +0000 UTC (0+2 container statuses recorded) Nov 26 00:13:08.235: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:13:08.235: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:13:08.235: INFO: volume-snapshot-controller-0 started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container volume-snapshot-controller ready: true, restart count 5 Nov 26 00:13:08.235: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:29 +0000 UTC (0+4 container statuses recorded) Nov 26 00:13:08.235: INFO: Container busybox ready: false, restart count 5 Nov 26 00:13:08.235: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 00:13:08.235: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 00:13:08.235: INFO: Container mock ready: false, restart count 5 Nov 26 00:13:08.235: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:00 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:08.235: INFO: Container csi-attacher ready: false, restart count 4 Nov 26 00:13:08.235: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 00:13:08.235: INFO: Container csi-resizer ready: false, restart count 4 Nov 26 00:13:08.235: INFO: Container csi-snapshotter ready: false, restart count 4 Nov 26 00:13:08.235: INFO: Container hostpath ready: false, restart count 4 Nov 26 00:13:08.235: INFO: Container liveness-probe ready: false, restart count 4 Nov 26 00:13:08.235: INFO: Container node-driver-registrar ready: false, restart count 4 Nov 26 00:13:08.235: INFO: ss-2 started at 2022-11-26 00:01:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.235: INFO: Container webserver ready: false, restart count 6 Nov 26 00:13:08.495: INFO: Latency metrics for node bootstrap-e2e-minion-group-51gr Nov 26 00:13:08.495: INFO: Logging node info for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:13:08.539: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-pjt7 5f55dd6b-a4d8-42f3-9e85-83e83c8dc9de 8082 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-pjt7 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-pjt7 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-6349":"bootstrap-e2e-minion-group-pjt7","csi-mock-csi-mock-volumes-8391":"bootstrap-e2e-minion-group-pjt7"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:07:59 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:11:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:12:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-pjt7,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:10:35 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:10:35 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:10:35 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:10:35 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.124.11,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:f7ade85e43e2873500c8f33f09edf4a9,SystemUUID:f7ade85e-43e2-8735-00c8-f33f09edf4a9,BootID:07ab1c04-9bf6-4a67-bfa8-8d3160253b07,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9 kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6,DevicePath:,},},Config:nil,},} Nov 26 00:13:08.540: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:13:08.594: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-pjt7 Nov 26 00:13:08.675: INFO: test-hostpath-type-n5z6m started at 2022-11-26 00:04:48 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:13:08.675: INFO: pod-secrets-890a9a5b-57be-471c-8757-4aad820ed6d0 started at 2022-11-25 23:58:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:13:08.675: INFO: var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364 started at 2022-11-26 00:01:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container dapi-container ready: false, restart count 0 Nov 26 00:13:08.675: INFO: hostpath-injector started at 2022-11-25 23:58:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container hostpath-injector ready: false, restart count 0 Nov 26 00:13:08.675: INFO: affinity-lb-esipp-transition-wvltg started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container affinity-lb-esipp-transition ready: true, restart count 3 Nov 26 00:13:08.675: INFO: external-provisioner-p6q4d started at 2022-11-26 00:04:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container nfs-provisioner ready: true, restart count 5 Nov 26 00:13:08.675: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-9qlmb started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:13:08.675: INFO: pod-configmaps-39454904-1ea1-4326-806f-d840f1ec6aab started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:13:08.675: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:08.675: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 00:13:08.675: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:13:08.675: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 00:13:08.675: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 00:13:08.675: INFO: Container hostpath ready: false, restart count 5 Nov 26 00:13:08.675: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 00:13:08.675: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 00:13:08.675: INFO: test-hostpath-type-whtq5 started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:13:08.675: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:48 +0000 UTC (0+7 container statuses recorded) Nov 26 00:13:08.675: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:13:08.675: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:13:08.675: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:13:08.675: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:13:08.675: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:13:08.675: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:13:08.675: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 00:13:08.675: INFO: netserver-2 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container webserver ready: true, restart count 1 Nov 26 00:13:08.675: INFO: test-hostpath-type-9nghx started at 2022-11-26 00:04:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:13:08.675: INFO: metrics-server-v0.5.2-867b8754b9-c8h52 started at 2022-11-25 23:57:03 +0000 UTC (0+2 container statuses recorded) Nov 26 00:13:08.675: INFO: Container metrics-server ready: false, restart count 6 Nov 26 00:13:08.675: INFO: Container metrics-server-nanny ready: false, restart count 7 Nov 26 00:13:08.675: INFO: pod-back-off-image started at 2022-11-26 00:00:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container back-off ready: false, restart count 7 Nov 26 00:13:08.675: INFO: pod-configmaps-607fb46f-a546-474e-99da-bccf05cace4e started at 2022-11-25 23:59:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:13:08.675: INFO: inclusterclient started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container inclusterclient ready: false, restart count 0 Nov 26 00:13:08.675: INFO: pod-subpath-test-inlinevolume-xjdn started at 2022-11-26 00:04:26 +0000 UTC (1+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Init container init-volume-inlinevolume-xjdn ready: true, restart count 0 Nov 26 00:13:08.675: INFO: Container test-container-subpath-inlinevolume-xjdn ready: false, restart count 0 Nov 26 00:13:08.675: INFO: pvc-volume-tester-wmxdq started at 2022-11-26 00:04:47 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:13:08.675: INFO: ss-0 started at 2022-11-25 23:59:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container webserver ready: false, restart count 6 Nov 26 00:13:08.675: INFO: external-local-nodeport-dhpjs started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container netexec ready: false, restart count 3 Nov 26 00:13:08.675: INFO: pod-cc7edce3-35cc-4f45-bad6-a784001395c6 started at 2022-11-26 00:00:17 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:13:08.675: INFO: konnectivity-agent-ft6wq started at 2022-11-25 23:56:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container konnectivity-agent ready: false, restart count 6 Nov 26 00:13:08.675: INFO: netserver-2 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container webserver ready: true, restart count 7 Nov 26 00:13:08.675: INFO: pod-configmaps-283e1a65-2a1e-4f8e-9383-eeee204154b1 started at 2022-11-25 23:58:30 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:13:08.675: INFO: addon-reconcile-test-ftkd6 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container addon-reconcile-test ready: true, restart count 1 Nov 26 00:13:08.675: INFO: test-hostpath-type-gjrf5 started at 2022-11-26 00:09:39 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:13:08.675: INFO: csi-mockplugin-0 started at 2022-11-26 00:08:03 +0000 UTC (0+4 container statuses recorded) Nov 26 00:13:08.675: INFO: Container busybox ready: true, restart count 2 Nov 26 00:13:08.675: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 00:13:08.675: INFO: Container driver-registrar ready: true, restart count 2 Nov 26 00:13:08.675: INFO: Container mock ready: true, restart count 2 Nov 26 00:13:08.675: INFO: nfs-server started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container nfs-server ready: true, restart count 4 Nov 26 00:13:08.675: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-wfjcd started at 2022-11-26 00:02:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:13:08.675: INFO: nfs-io-client started at 2022-11-26 00:04:25 +0000 UTC (1+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Init container nfs-io-init ready: true, restart count 0 Nov 26 00:13:08.675: INFO: Container nfs-io-client ready: false, restart count 0 Nov 26 00:13:08.675: INFO: test-hostpath-type-lx6tk started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:13:08.675: INFO: test-hostpath-type-245dt started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:13:08.675: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:13:08.675: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:13:08.675: INFO: Container driver-registrar ready: true, restart count 3 Nov 26 00:13:08.675: INFO: Container mock ready: true, restart count 3 Nov 26 00:13:08.675: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:13:08.675: INFO: test-hostpath-type-24tqc started at 2022-11-26 00:09:42 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container host-path-testing ready: true, restart count 0 Nov 26 00:13:08.675: INFO: kube-proxy-bootstrap-e2e-minion-group-pjt7 started at 2022-11-25 23:56:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container kube-proxy ready: true, restart count 7 Nov 26 00:13:08.675: INFO: metadata-proxy-v0.1-9jgjn started at 2022-11-25 23:56:35 +0000 UTC (0+2 container statuses recorded) Nov 26 00:13:08.675: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:13:08.675: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:13:08.675: INFO: pod-subpath-test-preprovisionedpv-92c8 started at 2022-11-26 00:02:55 +0000 UTC (1+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Init container init-volume-preprovisionedpv-92c8 ready: true, restart count 0 Nov 26 00:13:08.675: INFO: Container test-container-subpath-preprovisionedpv-92c8 ready: false, restart count 0 Nov 26 00:13:08.675: INFO: external-provisioner-5cfqt started at 2022-11-26 00:09:22 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container nfs-provisioner ready: true, restart count 0 Nov 26 00:13:08.675: INFO: test-hostpath-type-x56nj started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:13:08.675: INFO: Container host-path-sh-testing ready: false, restart count 0 Nov 26 00:13:08.962: INFO: Latency metrics for node bootstrap-e2e-minion-group-pjt7 [DeferCleanup (Each)] [sig-api-machinery] Servers with support for API chunking tear down framework | framework.go:193 STEP: Destroying namespace "chunking-9244" for this suite. 11/26/22 00:13:08.962
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sCronJob\sshould\snot\sschedule\sjobs\swhen\ssuspended\s\[Slow\]\s\[Conformance\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0001c5860) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:14:32.317 Nov 26 00:14:32.318: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename cronjob 11/26/22 00:14:32.319 Nov 26 00:14:32.359: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:34.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:36.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:38.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:40.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:42.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:44.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:46.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:48.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:50.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:52.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:54.399: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:56.398: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:58.399: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:00.399: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:02.399: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:02.438: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:02.438: INFO: Unexpected error: <*errors.errorString | 0xc000207ce0>: { s: "timed out waiting for the condition", } Nov 26 00:15:02.438: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0001c5860) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 Nov 26 00:15:02.439: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:15:02.478 [DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sCronJob\sshould\snot\sschedule\snew\sjobs\swhen\sForbidConcurrent\s\[Slow\]\s\[Conformance\]$'
test/e2e/apps/cronjob.go:152 k8s.io/kubernetes/test/e2e/apps.glob..func2.3() test/e2e/apps/cronjob.go:152 +0xa3c There were additional failures detected after the initial failure: [FAILED] Nov 26 00:13:59.659: failed to list events in namespace "cronjob-6687": Get "https://34.168.120.117/api/v1/namespaces/cronjob-6687/events": dial tcp 34.168.120.117:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 00:13:59.699: Couldn't delete ns: "cronjob-6687": Delete "https://34.168.120.117/api/v1/namespaces/cronjob-6687": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/cronjob-6687", Err:(*net.OpError)(0xc0038ca910)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:09:48.57 Nov 26 00:09:48.570: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename cronjob 11/26/22 00:09:48.572 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 00:09:48.914 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 00:09:49.017 [BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 [It] should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] test/e2e/apps/cronjob.go:124 STEP: Creating a ForbidConcurrent cronjob 11/26/22 00:09:49.135 STEP: Ensuring a job is scheduled 11/26/22 00:09:49.296 STEP: Ensuring exactly one is scheduled 11/26/22 00:13:15.375 STEP: Ensuring exactly one running job exists by listing jobs explicitly 11/26/22 00:13:15.436 STEP: Ensuring no more jobs are scheduled 11/26/22 00:13:15.498 STEP: Removing cronjob 11/26/22 00:13:59.539 Nov 26 00:13:59.579: INFO: Unexpected error: Failed to delete CronJob forbid in namespace cronjob-6687: <*url.Error | 0xc001fa4c30>: { Op: "Delete", URL: "https://34.168.120.117/apis/batch/v1/namespaces/cronjob-6687/cronjobs/forbid", Err: <*net.OpError | 0xc0038ca640>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc002442600>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc0037f82e0>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:13:59.579: FAIL: Failed to delete CronJob forbid in namespace cronjob-6687: Delete "https://34.168.120.117/apis/batch/v1/namespaces/cronjob-6687/cronjobs/forbid": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/apps.glob..func2.3() test/e2e/apps/cronjob.go:152 +0xa3c [AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 Nov 26 00:13:59.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:13:59.619 STEP: Collecting events from namespace "cronjob-6687". 11/26/22 00:13:59.619 Nov 26 00:13:59.658: INFO: Unexpected error: failed to list events in namespace "cronjob-6687": <*url.Error | 0xc001b64f60>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/cronjob-6687/events", Err: <*net.OpError | 0xc0041832c0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc002442b70>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc00445c4c0>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:13:59.659: FAIL: failed to list events in namespace "cronjob-6687": Get "https://34.168.120.117/api/v1/namespaces/cronjob-6687/events": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc00149a5c0, {0xc004456ab0, 0xc}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc001c4b040}, {0xc004456ab0, 0xc}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc00149a650?, {0xc004456ab0?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc000939860) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc0029b04f0?, 0xc003c59fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc000c4fdc8?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc0029b04f0?, 0x29449fc?}, {0xae73300?, 0xc003c59f80?, 0x2a6d786?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 STEP: Destroying namespace "cronjob-6687" for this suite. 11/26/22 00:13:59.659 Nov 26 00:13:59.698: FAIL: Couldn't delete ns: "cronjob-6687": Delete "https://34.168.120.117/api/v1/namespaces/cronjob-6687": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/cronjob-6687", Err:(*net.OpError)(0xc0038ca910)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc000939860) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc0029b0470?, 0x0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc0029b0470?, 0x7fc8c08?}, {0xae73300?, 0xc000c3e5b0?, 0xc000c565a8?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sStatefulSet\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sScaling\sshould\shappen\sin\spredictable\sorder\sand\shalt\sif\sany\sstateful\spod\sis\sunhealthy\s\[Slow\]\s\[Conformance\]$'
test/e2e/framework/statefulset/rest.go:69 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x801de88, 0xc0025d4000}, 0xc002486f00) test/e2e/framework/statefulset/rest.go:69 +0x153 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1() test/e2e/framework/statefulset/wait.go:37 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc000d4c8e8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x90?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x1?, 0xc0011f5de0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc0025d4000?, 0xc0011f5e20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc0025d4000}, 0x3, 0x3, 0xc002486f00) test/e2e/framework/statefulset/wait.go:35 +0xbd k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.10() test/e2e/apps/statefulset.go:643 +0x6d0 There were additional failures detected after the initial failure: [FAILED] Nov 26 00:03:03.632: Get "https://34.168.120.117/apis/apps/v1/namespaces/statefulset-6218/statefulsets": dial tcp 34.168.120.117:443: connect: connection refused In [AfterEach] at: test/e2e/framework/statefulset/rest.go:76 ---------- [FAILED] Nov 26 00:03:03.713: failed to list events in namespace "statefulset-6218": Get "https://34.168.120.117/api/v1/namespaces/statefulset-6218/events": dial tcp 34.168.120.117:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 00:03:03.753: Couldn't delete ns: "statefulset-6218": Delete "https://34.168.120.117/api/v1/namespaces/statefulset-6218": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/statefulset-6218", Err:(*net.OpError)(0xc003fa3b30)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 STEP: Creating a kubernetes client 11/25/22 23:59:50.756 Nov 25 23:59:50.756: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename statefulset 11/25/22 23:59:50.758 STEP: Waiting for a default service account to be provisioned in namespace 11/25/22 23:59:50.926 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/25/22 23:59:51.021 [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-apps] StatefulSet test/e2e/apps/statefulset.go:98 [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:113 STEP: Creating service test in namespace statefulset-6218 11/25/22 23:59:51.118 [It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] test/e2e/apps/statefulset.go:587 STEP: Initializing watcher for selector baz=blah,foo=bar 11/25/22 23:59:51.186 STEP: Creating stateful set ss in namespace statefulset-6218 11/25/22 23:59:51.258 STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-6218 11/25/22 23:59:51.332 Nov 25 23:59:51.390: INFO: Found 0 stateful pods, waiting for 1 Nov 26 00:00:01.453: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:00:11.440: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:00:21.444: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:00:31.435: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:00:41.435: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:00:51.439: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod 11/26/22 00:00:51.439 Nov 26 00:00:51.482: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.120.117 --kubeconfig=/workspace/.kube/config --namespace=statefulset-6218 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 00:00:52.126: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" Nov 26 00:00:52.126: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" Nov 26 00:00:52.126: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' Nov 26 00:00:52.189: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true Nov 26 00:01:02.231: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false Nov 26 00:01:02.231: INFO: Waiting for statefulset status.replicas updated to 0 Nov 26 00:01:02.434: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999128s Nov 26 00:01:03.489: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.955075825s Nov 26 00:01:04.534: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.900746536s Nov 26 00:01:05.583: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.85585488s Nov 26 00:01:06.626: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.80633528s Nov 26 00:01:07.670: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.763389366s Nov 26 00:01:08.715: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.719437174s Nov 26 00:01:09.763: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.674856117s Nov 26 00:01:10.808: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.626710454s Nov 26 00:01:11.852: INFO: Verifying statefulset ss doesn't scale past 1 for another 581.286823ms STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-6218 11/26/22 00:01:12.852 Nov 26 00:01:12.895: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.120.117 --kubeconfig=/workspace/.kube/config --namespace=statefulset-6218 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 00:01:13.468: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" Nov 26 00:01:13.468: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" Nov 26 00:01:13.468: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' Nov 26 00:01:13.511: INFO: Found 1 stateful pods, waiting for 3 Nov 26 00:01:23.556: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:01:23.557: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:01:33.571: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:01:33.571: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:01:43.555: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:01:43.555: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:01:43.555: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:01:53.556: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:01:53.556: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:01:53.556: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:02:03.555: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:02:03.555: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:02:03.555: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:02:13.556: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:02:13.556: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:02:13.556: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:02:23.555: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:02:23.555: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:02:23.555: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:02:33.562: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:02:33.562: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 00:02:33.562: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:02:43.554: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:02:53.874: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 00:03:03.551: INFO: Unexpected error: <*url.Error | 0xc002a07170>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/statefulset-6218/pods?labelSelector=baz%3Dblah%2Cfoo%3Dbar", Err: <*net.OpError | 0xc003fa3630>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0016c55f0>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc001610fc0>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:03:03.551: FAIL: Get "https://34.168.120.117/api/v1/namespaces/statefulset-6218/pods?labelSelector=baz%3Dblah%2Cfoo%3Dbar": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x801de88, 0xc0025d4000}, 0xc002486f00) test/e2e/framework/statefulset/rest.go:69 +0x153 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1() test/e2e/framework/statefulset/wait.go:37 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc000d4c8e8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x90?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x1?, 0xc0011f5de0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc0025d4000?, 0xc0011f5e20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc0025d4000}, 0x3, 0x3, 0xc002486f00) test/e2e/framework/statefulset/wait.go:35 +0xbd k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.10() test/e2e/apps/statefulset.go:643 +0x6d0 E1126 00:03:03.552365 8132 runtime.go:79] Observed a panic: types.GinkgoError{Heading:"Your Test Panicked", Message:"When you, or your assertion library, calls Ginkgo's Fail(),\nGinkgo panics to prevent subsequent assertions from running.\n\nNormally Ginkgo rescues this panic so you shouldn't see it.\n\nHowever, if you make an assertion in a goroutine, Ginkgo can't capture the panic.\nTo circumvent this, you should call\n\n\tdefer GinkgoRecover()\n\nat the top of the goroutine that caused this panic.\n\nAlternatively, you may have made an assertion outside of a Ginkgo\nleaf node (e.g. in a container node or some out-of-band function) - please move your assertion to\nan appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).", DocLink:"mental-model-how-ginkgo-handles-failure", CodeLocation:types.CodeLocation{FileName:"test/e2e/framework/statefulset/rest.go", LineNumber:69, FullStackTrace:"k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x801de88, 0xc0025d4000}, 0xc002486f00)\n\ttest/e2e/framework/statefulset/rest.go:69 +0x153\nk8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1()\n\ttest/e2e/framework/statefulset/wait.go:37 +0x4a\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0})\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0x262a61f?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc000d4c8e8, 0x2fdb16a?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x90?, 0x2fd9d05?, 0x20?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x1?, 0xc0011f5de0?, 0x262a967?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc0025d4000?, 0xc0011f5e20?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50\nk8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc0025d4000}, 0x3, 0x3, 0xc002486f00)\n\ttest/e2e/framework/statefulset/wait.go:35 +0xbd\nk8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...)\n\ttest/e2e/framework/statefulset/wait.go:80\nk8s.io/kubernetes/test/e2e/apps.glob..func10.2.10()\n\ttest/e2e/apps/statefulset.go:643 +0x6d0", CustomMessage:""}} (�[1m�[38;5;9mYour Test Panicked�[0m �[38;5;243mtest/e2e/framework/statefulset/rest.go:69�[0m When you, or your assertion library, calls Ginkgo's Fail(), Ginkgo panics to prevent subsequent assertions from running. Normally Ginkgo rescues this panic so you shouldn't see it. However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. To circumvent this, you should call defer GinkgoRecover() at the top of the goroutine that caused this panic. Alternatively, you may have made an assertion outside of a Ginkgo leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...). �[1mLearn more at:�[0m �[38;5;14m�[4mhttp://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure�[0m ) goroutine 1062 [running]: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime.logPanic({0x70eb7e0?, 0xc0003b45b0}) vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:75 +0x99 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime.HandleCrash({0x0, 0x0, 0xc0003b45b0?}) vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:49 +0x75 panic({0x70eb7e0, 0xc0003b45b0}) /usr/local/go/src/runtime/panic.go:884 +0x212 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc00183c6c0, 0xb8}, {0xc001b2d540?, 0x75b521a?, 0xc001b2d560?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 +0x225 k8s.io/kubernetes/test/e2e/framework.Fail({0xc003f14000, 0xa3}, {0xc001b2d5d8?, 0xc003f14000?, 0xc001b2d600?}) test/e2e/framework/log.go:61 +0x145 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fadf60, 0xc002a07170}, {0x0?, 0xc00516f1c0?, 0x10?}) test/e2e/framework/expect.go:76 +0x267 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x801de88, 0xc0025d4000}, 0xc002486f00) test/e2e/framework/statefulset/rest.go:69 +0x153 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1() test/e2e/framework/statefulset/wait.go:37 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc000136000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc000136000}, 0xc000d4c8e8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc000136000}, 0x90?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc000136000}, 0x1?, 0xc0011f5de0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc0025d4000?, 0xc0011f5e20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc0025d4000}, 0x3, 0x3, 0xc002486f00) test/e2e/framework/statefulset/wait.go:35 +0xbd k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.10() test/e2e/apps/statefulset.go:643 +0x6d0 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0013b1800}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 +0x1b k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 +0x98 created by k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 +0xe3d [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:124 Nov 26 00:03:03.592: INFO: Deleting all statefulset in ns statefulset-6218 Nov 26 00:03:03.632: INFO: Unexpected error: <*url.Error | 0xc002a076b0>: { Op: "Get", URL: "https://34.168.120.117/apis/apps/v1/namespaces/statefulset-6218/statefulsets", Err: <*net.OpError | 0xc003fa3900>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0016c58c0>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc001611400>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:03:03.632: FAIL: Get "https://34.168.120.117/apis/apps/v1/namespaces/statefulset-6218/statefulsets": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/statefulset.DeleteAllStatefulSets({0x801de88, 0xc0025d4000}, {0xc0012ab970, 0x10}) test/e2e/framework/statefulset/rest.go:76 +0x113 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.2() test/e2e/apps/statefulset.go:129 +0x1b2 [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 Nov 26 00:03:03.633: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:03:03.672 STEP: Collecting events from namespace "statefulset-6218". 11/26/22 00:03:03.673 Nov 26 00:03:03.712: INFO: Unexpected error: failed to list events in namespace "statefulset-6218": <*url.Error | 0xc0028e9620>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/statefulset-6218/events", Err: <*net.OpError | 0xc002247680>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0016c5e30>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc003529000>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:03:03.713: FAIL: failed to list events in namespace "statefulset-6218": Get "https://34.168.120.117/api/v1/namespaces/statefulset-6218/events": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc001b285c0, {0xc0012ab970, 0x10}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc0025d4000}, {0xc0012ab970, 0x10}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc001b28650?, {0xc0012ab970?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc000ad9ef0) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc0012a4f10?, 0xc0037e2fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc001841748?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc0012a4f10?, 0x29449fc?}, {0xae73300?, 0xc0037e2f80?, 0x0?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 STEP: Destroying namespace "statefulset-6218" for this suite. 11/26/22 00:03:03.713 Nov 26 00:03:03.753: FAIL: Couldn't delete ns: "statefulset-6218": Delete "https://34.168.120.117/api/v1/namespaces/statefulset-6218": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/statefulset-6218", Err:(*net.OpError)(0xc003fa3b30)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc000ad9ef0) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc0012a4e20?, 0x0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc0012a4e20?, 0x0?}, {0xae73300?, 0x0?, 0x0?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-auth\]\sServiceAccounts\sshould\ssupport\sInClusterConfig\swith\stoken\srotation\s\[Slow\]$'
test/e2e/auth/service_accounts.go:497 k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:497 +0x877 There were additional failures detected after the initial failure: [FAILED] Nov 26 00:04:51.117: failed to list events in namespace "svcaccounts-6034": Get "https://34.168.120.117/api/v1/namespaces/svcaccounts-6034/events": dial tcp 34.168.120.117:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 00:04:51.157: Couldn't delete ns: "svcaccounts-6034": Delete "https://34.168.120.117/api/v1/namespaces/svcaccounts-6034": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/svcaccounts-6034", Err:(*net.OpError)(0xc003167e00)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:03:00.385 Nov 26 00:03:00.385: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename svcaccounts 11/26/22 00:03:00.387 Nov 26 00:03:00.427: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:02.468: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:04.467: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:06.467: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:08.467: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:10.468: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:12.468: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:14.467: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:16.467: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:18.468: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:20.468: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:22.467: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:24.467: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:26.468: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:28.468: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 00:04:24.628 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 00:04:24.709 [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 [It] should support InClusterConfig with token rotation [Slow] test/e2e/auth/service_accounts.go:432 Nov 26 00:04:24.916: INFO: created pod Nov 26 00:04:24.916: INFO: Waiting up to 1m0s for 1 pods to be running and ready: [inclusterclient] Nov 26 00:04:24.916: INFO: Waiting up to 1m0s for pod "inclusterclient" in namespace "svcaccounts-6034" to be "running and ready" Nov 26 00:04:24.997: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 80.918748ms Nov 26 00:04:24.997: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:27.066: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 2.150236512s Nov 26 00:04:27.066: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:29.158: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 4.242385035s Nov 26 00:04:29.158: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:31.160: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 6.244284712s Nov 26 00:04:31.160: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:33.106: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 8.190185264s Nov 26 00:04:33.106: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:35.133: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 10.217500857s Nov 26 00:04:35.134: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:37.115: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 12.198566084s Nov 26 00:04:37.115: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:39.092: INFO: Pod "inclusterclient": Phase="Failed", Reason="", readiness=false. Elapsed: 14.176328681s Nov 26 00:04:39.092: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Failed' Nov 26 00:04:41.053: INFO: Pod "inclusterclient": Phase="Failed", Reason="", readiness=false. Elapsed: 16.137137785s Nov 26 00:04:41.053: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Failed' Nov 26 00:04:43.075: INFO: Pod "inclusterclient": Phase="Failed", Reason="", readiness=false. Elapsed: 18.158966358s Nov 26 00:04:43.075: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Failed' Nov 26 00:04:45.083: INFO: Pod "inclusterclient": Phase="Failed", Reason="", readiness=false. Elapsed: 20.166961865s Nov 26 00:04:45.083: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Failed' Nov 26 00:04:47.061: INFO: Pod "inclusterclient": Phase="Failed", Reason="", readiness=false. Elapsed: 22.144965416s Nov 26 00:04:47.061: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Failed' Nov 26 00:04:49.096: INFO: Pod "inclusterclient": Phase="Failed", Reason="", readiness=false. Elapsed: 24.180031417s Nov 26 00:04:49.096: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Failed' Nov 26 00:04:51.037: INFO: Encountered non-retryable error while getting pod svcaccounts-6034/inclusterclient: Get "https://34.168.120.117/api/v1/namespaces/svcaccounts-6034/pods/inclusterclient": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:51.037: INFO: Pod inclusterclient failed to be running and ready. Nov 26 00:04:51.037: INFO: Wanted all 1 pods to be running and ready. Result: false. Pods: [inclusterclient] Nov 26 00:04:51.037: FAIL: pod "inclusterclient" in ns "svcaccounts-6034" never became ready Full Stack Trace k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:497 +0x877 [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 Nov 26 00:04:51.037: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:04:51.077 STEP: Collecting events from namespace "svcaccounts-6034". 11/26/22 00:04:51.077 Nov 26 00:04:51.117: INFO: Unexpected error: failed to list events in namespace "svcaccounts-6034": <*url.Error | 0xc0032159b0>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/svcaccounts-6034/events", Err: <*net.OpError | 0xc003759d60>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc004846210>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc0012f2860>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:04:51.117: FAIL: failed to list events in namespace "svcaccounts-6034": Get "https://34.168.120.117/api/v1/namespaces/svcaccounts-6034/events": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc00115a5c0, {0xc0038d79c0, 0x10}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc003970680}, {0xc0038d79c0, 0x10}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc00115a650?, {0xc0038d79c0?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc000a763c0) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc001626cd0?, 0xc0037edfb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc0003db408?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc001626cd0?, 0x29449fc?}, {0xae73300?, 0xc0037edf80?, 0x3a212e4?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 STEP: Destroying namespace "svcaccounts-6034" for this suite. 11/26/22 00:04:51.118 Nov 26 00:04:51.157: FAIL: Couldn't delete ns: "svcaccounts-6034": Delete "https://34.168.120.117/api/v1/namespaces/svcaccounts-6034": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/svcaccounts-6034", Err:(*net.OpError)(0xc003167e00)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc000a763c0) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc001626c20?, 0xae75720?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc0049d2e4d?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc001626c20?, 0x7fa141f890e0?}, {0xae73300?, 0x7892bc0?, 0x10000?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\sclient\sSimple\spod\sshould\sreturn\scommand\sexit\scodes\s\[Slow\]\srunning\sa\sfailing\scommand\swith\s\-\-leave\-stdin\-open$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000c162d0) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:16:53.624 Nov 26 00:16:53.625: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename kubectl 11/26/22 00:16:53.626 Nov 26 00:18:53.675: INFO: Unexpected error: <*fmt.wrapError | 0xc005098060>: { msg: "wait for service account \"default\" in namespace \"kubectl-8411\": timed out waiting for the condition", err: <*errors.errorString | 0xc00017da10>{ s: "timed out waiting for the condition", }, } Nov 26 00:18:53.675: FAIL: wait for service account "default" in namespace "kubectl-8411": timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000c162d0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 Nov 26 00:18:53.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:18:53.722 STEP: Collecting events from namespace "kubectl-8411". 11/26/22 00:18:53.722 STEP: Found 0 events. 11/26/22 00:18:53.766 Nov 26 00:18:53.808: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 00:18:53.808: INFO: Nov 26 00:18:53.855: INFO: Logging node info for node bootstrap-e2e-master Nov 26 00:18:53.901: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master fd5fd34c-05e8-4c7e-8cbe-bf91f0f95cea 9334 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 00:17:13 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.120.117,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:4d77311e15d4bb4a15d85de5a36cea94,SystemUUID:4d77311e-15d4-bb4a-15d8-5de5a36cea94,BootID:80daeaca-84b8-4927-98e9-a38242975836,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:124989749,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:57659704,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:18:53.902: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 00:18:53.947: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 00:18:54.003: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.003: INFO: Container kube-scheduler ready: true, restart count 6 Nov 26 00:18:54.003: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.003: INFO: Container kube-apiserver ready: true, restart count 3 Nov 26 00:18:54.003: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.003: INFO: Container l7-lb-controller ready: true, restart count 7 Nov 26 00:18:54.003: INFO: metadata-proxy-v0.1-thx76 started at 2022-11-25 23:56:34 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:54.003: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:54.003: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:54.003: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.003: INFO: Container kube-controller-manager ready: false, restart count 6 Nov 26 00:18:54.003: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.003: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:18:54.003: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.003: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:18:54.003: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.003: INFO: Container konnectivity-server-container ready: true, restart count 1 Nov 26 00:18:54.003: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.003: INFO: Container kube-addon-manager ready: true, restart count 1 Nov 26 00:18:54.205: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 00:18:54.205: INFO: Logging node info for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:54.247: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-4434 1aba3539-104b-4667-ab07-196915781437 9581 0 2022-11-25 23:56:41 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-4434 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-4434 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1058":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2121":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2486":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-9066":"bootstrap-e2e-minion-group-4434","csi-hostpath-provisioning-985":"bootstrap-e2e-minion-group-4434","csi-mock-csi-mock-volumes-2299":"bootstrap-e2e-minion-group-4434"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:08:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:47 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:18:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-4434,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:18:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:18:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:18:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:18:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.8.98,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:e4112b9ebf318dd47967311e73935166,SystemUUID:e4112b9e-bf31-8dd4-7967-311e73935166,BootID:519ea9fb-1f7c-420e-8cea-cf36b5a7caca,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},},Config:nil,},} Nov 26 00:18:54.248: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:54.364: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-4434 Nov 26 00:18:54.481: INFO: kube-proxy-bootstrap-e2e-minion-group-4434 started at 2022-11-25 23:56:41 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container kube-proxy ready: false, restart count 7 Nov 26 00:18:54.481: INFO: netserver-0 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container webserver ready: false, restart count 7 Nov 26 00:18:54.481: INFO: pod-93ad783f-bd8c-43cd-b936-dc278433c338 started at 2022-11-26 00:04:42 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:54.481: INFO: hostexec-bootstrap-e2e-minion-group-4434-4ctv8 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container agnhost-container ready: false, restart count 1 Nov 26 00:18:54.481: INFO: pvc-tester-hjwtq started at 2022-11-26 00:04:35 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:54.481: INFO: hostexec-bootstrap-e2e-minion-group-4434-x8nd2 started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:18:54.481: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:30 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:54.481: INFO: Container busybox ready: false, restart count 5 Nov 26 00:18:54.481: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container driver-registrar ready: true, restart count 6 Nov 26 00:18:54.481: INFO: Container mock ready: true, restart count 6 Nov 26 00:18:54.481: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:54.481: INFO: Container csi-attacher ready: true, restart count 8 Nov 26 00:18:54.481: INFO: Container csi-provisioner ready: true, restart count 8 Nov 26 00:18:54.481: INFO: Container csi-resizer ready: true, restart count 8 Nov 26 00:18:54.481: INFO: Container csi-snapshotter ready: true, restart count 8 Nov 26 00:18:54.481: INFO: Container hostpath ready: true, restart count 8 Nov 26 00:18:54.481: INFO: Container liveness-probe ready: true, restart count 8 Nov 26 00:18:54.481: INFO: Container node-driver-registrar ready: true, restart count 8 Nov 26 00:18:54.481: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:59:16 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:54.481: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container node-driver-registrar ready: false, restart count 7 Nov 26 00:18:54.481: INFO: konnectivity-agent-9h6nk started at 2022-11-25 23:56:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container konnectivity-agent ready: false, restart count 7 Nov 26 00:18:54.481: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:54.481: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:54.481: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:18:54.481: INFO: pod-secrets-b416252b-41f0-47a8-a1f1-2904f5649ea7 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:18:54.481: INFO: metadata-proxy-v0.1-kdtvq started at 2022-11-25 23:56:42 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:54.481: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:54.481: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:54.481: INFO: pod-570aaca2-5565-4c62-89d3-a199c7b4ebbb started at 2022-11-25 23:58:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:54.481: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:02:52 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:54.481: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 00:18:54.481: INFO: test-hostpath-type-966d7 started at 2022-11-26 00:13:19 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:54.481: INFO: hostexec-bootstrap-e2e-minion-group-4434-9kcrr started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container agnhost-container ready: false, restart count 6 Nov 26 00:18:54.481: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:54.481: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:18:54.481: INFO: netserver-0 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container webserver ready: true, restart count 4 Nov 26 00:18:54.481: INFO: test-hostpath-type-cstjx started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:54.481: INFO: nfs-server started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container nfs-server ready: false, restart count 2 Nov 26 00:18:54.481: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:54.481: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:54.481: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 00:18:54.481: INFO: csi-mockplugin-0 started at 2022-11-25 23:59:02 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:54.481: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:18:54.481: INFO: Container driver-registrar ready: true, restart count 6 Nov 26 00:18:54.481: INFO: Container mock ready: true, restart count 6 Nov 26 00:18:54.481: INFO: ss-1 started at 2022-11-26 00:01:13 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container webserver ready: true, restart count 7 Nov 26 00:18:54.481: INFO: pod-e24536f7-0c3d-44a2-ab47-cf68d9a28e12 started at 2022-11-26 00:04:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:54.481: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:54.834: INFO: Latency metrics for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:54.834: INFO: Logging node info for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:54.879: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-51gr 739f23c9-858a-495c-bf21-9f7320b53ec4 9492 0 2022-11-25 23:56:31 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-51gr kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-51gr topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-6045":"bootstrap-e2e-minion-group-51gr","csi-hostpath-multivolume-855":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumeio-1998":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumemode-9682":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumemode-9999":"bootstrap-e2e-minion-group-51gr","csi-mock-csi-mock-volumes-2541":"bootstrap-e2e-minion-group-51gr"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 00:13:55 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:38 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:18:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-51gr,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:39 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:33 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.82.95.192,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:c1fa84483022b650834cff54e6b41aff,SystemUUID:c1fa8448-3022-b650-834c-ff54e6b41aff,BootID:3164b9a2-246e-435e-be83-42c92c567f8b,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-6045^2bdc44ff-6d1f-11ed-96fb-560e2640bdfc,DevicePath:,},},Config:nil,},} Nov 26 00:18:54.880: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:54.925: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-51gr Nov 26 00:18:55.002: INFO: l7-default-backend-8549d69d99-97xrr started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 00:18:55.002: INFO: netserver-1 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container webserver ready: false, restart count 8 Nov 26 00:18:55.002: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-attacher ready: true, restart count 8 Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: true, restart count 8 Nov 26 00:18:55.002: INFO: Container csi-resizer ready: true, restart count 8 Nov 26 00:18:55.002: INFO: Container csi-snapshotter ready: true, restart count 8 Nov 26 00:18:55.002: INFO: Container hostpath ready: true, restart count 8 Nov 26 00:18:55.002: INFO: Container liveness-probe ready: true, restart count 8 Nov 26 00:18:55.002: INFO: Container node-driver-registrar ready: true, restart count 8 Nov 26 00:18:55.002: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:55.002: INFO: coredns-6d97d5ddb-7cmct started at 2022-11-25 23:56:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container coredns ready: false, restart count 8 Nov 26 00:18:55.002: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:00:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-attacher ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container csi-resizer ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container csi-snapshotter ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container hostpath ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container liveness-probe ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container node-driver-registrar ready: true, restart count 6 Nov 26 00:18:55.002: INFO: hostexec-bootstrap-e2e-minion-group-51gr-6fnrz started at 2022-11-26 00:13:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:18:55.002: INFO: pod-subpath-test-preprovisionedpv-zqzf started at 2022-11-26 00:04:41 +0000 UTC (1+2 container statuses recorded) Nov 26 00:18:55.002: INFO: Init container init-volume-preprovisionedpv-zqzf ready: true, restart count 4 Nov 26 00:18:55.002: INFO: Container test-container-subpath-preprovisionedpv-zqzf ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container test-container-volume-preprovisionedpv-zqzf ready: true, restart count 6 Nov 26 00:18:55.002: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-attacher ready: true, restart count 7 Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: true, restart count 7 Nov 26 00:18:55.002: INFO: Container csi-resizer ready: true, restart count 7 Nov 26 00:18:55.002: INFO: Container csi-snapshotter ready: true, restart count 7 Nov 26 00:18:55.002: INFO: Container hostpath ready: true, restart count 7 Nov 26 00:18:55.002: INFO: Container liveness-probe ready: true, restart count 7 Nov 26 00:18:55.002: INFO: Container node-driver-registrar ready: true, restart count 7 Nov 26 00:18:55.002: INFO: pod-subpath-test-inlinevolume-c42v started at 2022-11-26 00:13:32 +0000 UTC (1+2 container statuses recorded) Nov 26 00:18:55.002: INFO: Init container init-volume-inlinevolume-c42v ready: true, restart count 1 Nov 26 00:18:55.002: INFO: Container test-container-subpath-inlinevolume-c42v ready: true, restart count 3 Nov 26 00:18:55.002: INFO: Container test-container-volume-inlinevolume-c42v ready: true, restart count 3 Nov 26 00:18:55.002: INFO: hostpath-symlink-prep-provisioning-1590 started at 2022-11-26 00:13:53 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container init-volume-provisioning-1590 ready: false, restart count 0 Nov 26 00:18:55.002: INFO: kube-dns-autoscaler-5f6455f985-7kdrd started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container autoscaler ready: false, restart count 7 Nov 26 00:18:55.002: INFO: netserver-1 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container webserver ready: true, restart count 5 Nov 26 00:18:55.002: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:13:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 00:18:55.002: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 00:18:55.002: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 00:18:55.002: INFO: Container hostpath ready: true, restart count 2 Nov 26 00:18:55.002: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 00:18:55.002: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 00:18:55.002: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 00:18:55.002: INFO: Container mock ready: true, restart count 6 Nov 26 00:18:55.002: INFO: csi-mockplugin-attacher-0 started at 2022-11-25 23:58:36 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:55.002: INFO: kube-proxy-bootstrap-e2e-minion-group-51gr started at 2022-11-25 23:56:31 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container kube-proxy ready: false, restart count 7 Nov 26 00:18:55.002: INFO: coredns-6d97d5ddb-6vx5m started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container coredns ready: false, restart count 7 Nov 26 00:18:55.002: INFO: konnectivity-agent-sg59x started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container konnectivity-agent ready: true, restart count 7 Nov 26 00:18:55.002: INFO: csi-mockplugin-0 started at 2022-11-25 23:58:36 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 00:18:55.002: INFO: Container driver-registrar ready: false, restart count 7 Nov 26 00:18:55.002: INFO: Container mock ready: false, restart count 7 Nov 26 00:18:55.002: INFO: hostexec-bootstrap-e2e-minion-group-51gr-gncwt started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container agnhost-container ready: true, restart count 4 Nov 26 00:18:55.002: INFO: metadata-proxy-v0.1-9xnlr started at 2022-11-25 23:56:32 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:55.002: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:55.002: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:55.002: INFO: volume-snapshot-controller-0 started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container volume-snapshot-controller ready: false, restart count 6 Nov 26 00:18:55.002: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:29 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:55.002: INFO: Container busybox ready: true, restart count 6 Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 00:18:55.002: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 00:18:55.002: INFO: Container mock ready: false, restart count 6 Nov 26 00:18:55.002: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:00 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:18:55.002: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 00:18:55.002: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 00:18:55.002: INFO: Container hostpath ready: false, restart count 5 Nov 26 00:18:55.002: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 00:18:55.002: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 00:18:55.002: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:13:42 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:55.002: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:18:55.002: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 00:18:55.002: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 00:18:55.002: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 00:18:55.002: INFO: Container hostpath ready: true, restart count 4 Nov 26 00:18:55.002: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 00:18:55.002: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:18:55.002: INFO: pod-9dca799b-afc1-4920-8cdb-15687c00da67 started at 2022-11-26 00:13:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:55.002: INFO: ss-2 started at 2022-11-26 00:01:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.002: INFO: Container webserver ready: false, restart count 8 Nov 26 00:18:55.420: INFO: Latency metrics for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:55.420: INFO: Logging node info for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:55.463: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-pjt7 5f55dd6b-a4d8-42f3-9e85-83e83c8dc9de 9608 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-pjt7 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-pjt7 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-6349":"bootstrap-e2e-minion-group-pjt7","csi-mock-csi-mock-volumes-8391":"bootstrap-e2e-minion-group-pjt7"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:13:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:18:55 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-pjt7,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:18:55 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:18:55 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:18:55 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:18:55 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.124.11,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:f7ade85e43e2873500c8f33f09edf4a9,SystemUUID:f7ade85e-43e2-8735-00c8-f33f09edf4a9,BootID:07ab1c04-9bf6-4a67-bfa8-8d3160253b07,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9 kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6,DevicePath:,},},Config:nil,},} Nov 26 00:18:55.464: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:55.510: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:55.590: INFO: pod-secrets-890a9a5b-57be-471c-8757-4aad820ed6d0 started at 2022-11-25 23:58:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:18:55.590: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-9qlmb started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container agnhost-container ready: false, restart count 1 Nov 26 00:18:55.590: INFO: test-hostpath-type-n5z6m started at 2022-11-26 00:04:48 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:55.590: INFO: pod-subpath-test-preprovisionedpv-jmbn started at 2022-11-26 00:13:46 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Init container init-volume-preprovisionedpv-jmbn ready: true, restart count 0 Nov 26 00:18:55.590: INFO: Container test-container-subpath-preprovisionedpv-jmbn ready: false, restart count 0 Nov 26 00:18:55.590: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:48 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:55.590: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:18:55.590: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 00:18:55.590: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 00:18:55.590: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 00:18:55.590: INFO: Container hostpath ready: true, restart count 4 Nov 26 00:18:55.590: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 00:18:55.590: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:18:55.590: INFO: netserver-2 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container webserver ready: true, restart count 3 Nov 26 00:18:55.590: INFO: test-hostpath-type-9nghx started at 2022-11-26 00:04:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:55.590: INFO: forbid-27823693-tbmqx started at 2022-11-26 00:13:14 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container c ready: false, restart count 1 Nov 26 00:18:55.590: INFO: external-provisioner-v86lp started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container nfs-provisioner ready: true, restart count 0 Nov 26 00:18:55.590: INFO: metrics-server-v0.5.2-867b8754b9-c8h52 started at 2022-11-25 23:57:03 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:55.590: INFO: Container metrics-server ready: false, restart count 7 Nov 26 00:18:55.590: INFO: Container metrics-server-nanny ready: false, restart count 8 Nov 26 00:18:55.590: INFO: pod-back-off-image started at 2022-11-26 00:00:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container back-off ready: false, restart count 8 Nov 26 00:18:55.590: INFO: pod-configmaps-607fb46f-a546-474e-99da-bccf05cace4e started at 2022-11-25 23:59:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:55.590: INFO: inclusterclient started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container inclusterclient ready: false, restart count 0 Nov 26 00:18:55.590: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-8b49h started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:18:55.590: INFO: test-hostpath-type-nxcg6 started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container host-path-sh-testing ready: true, restart count 0 Nov 26 00:18:55.590: INFO: ss-0 started at 2022-11-25 23:59:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container webserver ready: false, restart count 7 Nov 26 00:18:55.590: INFO: external-local-nodeport-dhpjs started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container netexec ready: true, restart count 5 Nov 26 00:18:55.590: INFO: pod-cc7edce3-35cc-4f45-bad6-a784001395c6 started at 2022-11-26 00:00:17 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:55.590: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-wcqzb started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:18:55.590: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-9r9pc started at 2022-11-26 00:13:18 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container agnhost-container ready: false, restart count 4 Nov 26 00:18:55.590: INFO: external-provisioner-994ds started at 2022-11-26 00:13:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container nfs-provisioner ready: true, restart count 5 Nov 26 00:18:55.590: INFO: konnectivity-agent-ft6wq started at 2022-11-25 23:56:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container konnectivity-agent ready: false, restart count 7 Nov 26 00:18:55.590: INFO: netserver-2 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container webserver ready: true, restart count 7 Nov 26 00:18:55.590: INFO: test-hostpath-type-mpdjn started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container host-path-testing ready: true, restart count 0 Nov 26 00:18:55.590: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-rkscb started at 2022-11-26 00:13:17 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:55.590: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-wfjcd started at 2022-11-26 00:02:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:18:55.590: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:55.590: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:55.590: INFO: Container driver-registrar ready: true, restart count 3 Nov 26 00:18:55.590: INFO: Container mock ready: true, restart count 3 Nov 26 00:18:55.590: INFO: pod-b48ba142-297b-4e60-b176-18111763e211 started at 2022-11-26 00:13:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:55.590: INFO: back-off-cap started at 2022-11-26 00:13:18 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.590: INFO: Container back-off-cap ready: false, restart count 5 Nov 26 00:18:55.590: INFO: lb-internal-lvsqm started at 2022-11-26 00:13:32 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container netexec ready: true, restart count 0 Nov 26 00:18:55.591: INFO: pod-subpath-test-preprovisionedpv-92c8 started at 2022-11-26 00:02:55 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Init container init-volume-preprovisionedpv-92c8 ready: true, restart count 0 Nov 26 00:18:55.591: INFO: Container test-container-subpath-preprovisionedpv-92c8 ready: false, restart count 0 Nov 26 00:18:55.591: INFO: test-hostpath-type-x56nj started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container host-path-sh-testing ready: false, restart count 0 Nov 26 00:18:55.591: INFO: var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364 started at 2022-11-26 00:01:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container dapi-container ready: false, restart count 0 Nov 26 00:18:55.591: INFO: hostpath-injector started at 2022-11-25 23:58:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container hostpath-injector ready: false, restart count 0 Nov 26 00:18:55.591: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-f89gp started at 2022-11-26 00:13:22 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:55.591: INFO: external-provisioner-p6q4d started at 2022-11-26 00:04:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container nfs-provisioner ready: true, restart count 6 Nov 26 00:18:55.591: INFO: test-hostpath-type-qw7ws started at 2022-11-26 00:13:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container host-path-sh-testing ready: true, restart count 0 Nov 26 00:18:55.591: INFO: pod-configmaps-39454904-1ea1-4326-806f-d840f1ec6aab started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:55.591: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:55.591: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:55.591: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:55.591: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:55.591: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:55.591: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:55.591: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:55.591: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:18:55.591: INFO: test-hostpath-type-whtq5 started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:55.591: INFO: pod-subpath-test-inlinevolume-xjdn started at 2022-11-26 00:04:26 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Init container init-volume-inlinevolume-xjdn ready: true, restart count 0 Nov 26 00:18:55.591: INFO: Container test-container-subpath-inlinevolume-xjdn ready: false, restart count 0 Nov 26 00:18:55.591: INFO: pvc-volume-tester-wmxdq started at 2022-11-26 00:04:47 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:18:55.591: INFO: pod-configmaps-283e1a65-2a1e-4f8e-9383-eeee204154b1 started at 2022-11-25 23:58:30 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:55.591: INFO: test-hostpath-type-gjrf5 started at 2022-11-26 00:09:39 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:55.591: INFO: nfs-server started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container nfs-server ready: true, restart count 5 Nov 26 00:18:55.591: INFO: pod-21071e9a-af84-46e8-af96-a6a6561cb020 started at 2022-11-26 00:13:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:55.591: INFO: test-hostpath-type-lx6tk started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:55.591: INFO: test-hostpath-type-245dt started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:55.591: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 00:18:55.591: INFO: csi-mockplugin-0 started at 2022-11-26 00:08:03 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:55.591: INFO: Container busybox ready: true, restart count 4 Nov 26 00:18:55.591: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:18:55.591: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 00:18:55.591: INFO: Container mock ready: false, restart count 5 Nov 26 00:18:55.591: INFO: kube-proxy-bootstrap-e2e-minion-group-pjt7 started at 2022-11-25 23:56:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container kube-proxy ready: true, restart count 8 Nov 26 00:18:55.591: INFO: metadata-proxy-v0.1-9jgjn started at 2022-11-25 23:56:35 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:55.591: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:55.591: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:55.591: INFO: pod-961df2a5-8218-4399-a758-55d8b52b3564 started at 2022-11-26 00:13:33 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:55.591: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:56.024: INFO: Latency metrics for node bootstrap-e2e-minion-group-pjt7 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 STEP: Destroying namespace "kubectl-8411" for this suite. 11/26/22 00:18:56.024
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\sclient\sSimple\spod\sshould\sreturn\scommand\sexit\scodes\s\[Slow\]\srunning\sa\sfailing\scommand\swithout\s\-\-restart\=Never\,\sbut\swith\s\-\-rm$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000da82d0) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:05:20.651 Nov 26 00:05:20.651: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename kubectl 11/26/22 00:05:20.654 Nov 26 00:05:20.694: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:22.733: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:24.733: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:26.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:28.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:30.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:32.733: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:34.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:36.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:38.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:40.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:42.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:44.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:46.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:48.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:50.734: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:50.774: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:50.774: INFO: Unexpected error: <*errors.errorString | 0xc00011bd30>: { s: "timed out waiting for the condition", } Nov 26 00:05:50.774: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000da82d0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 Nov 26 00:05:50.774: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:05:50.814 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\shandle\supdates\sto\sExternalTrafficPolicy\sfield$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc00128e1e0) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func20.2() test/e2e/network/loadbalancer.go:1262 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:20:10.5 Nov 26 00:20:10.500: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 00:20:10.502 Nov 26 00:20:10.541: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:12.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:14.581: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:16.581: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:18.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:20.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:22.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:24.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:26.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:28.581: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:30.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:32.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:34.581: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:36.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:38.581: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:40.582: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:40.621: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:40.621: INFO: Unexpected error: <*errors.errorString | 0xc000215d80>: { s: "timed out waiting for the condition", } Nov 26 00:20:40.621: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc00128e1e0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 00:20:40.621: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:20:40.661 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\sonly\starget\snodes\swith\sendpoints$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc00128e1e0) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func20.2() test/e2e/network/loadbalancer.go:1262 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:14:28.857 Nov 26 00:14:28.857: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 00:14:28.859 Nov 26 00:14:28.899: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:30.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:32.938: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:34.940: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:36.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:38.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:40.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:42.940: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:44.940: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:46.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:48.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:50.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:52.938: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:54.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:56.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:58.939: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:58.979: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:58.979: INFO: Unexpected error: <*errors.errorString | 0xc000215d80>: { s: "timed out waiting for the condition", } Nov 26 00:14:58.979: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc00128e1e0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 00:14:58.979: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:14:59.019 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfor\stype\=LoadBalancer$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000d20000) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func20.2() test/e2e/network/loadbalancer.go:1262 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:04:51.191 Nov 26 00:04:51.191: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 00:04:51.193 Nov 26 00:04:51.232: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:53.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:55.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:57.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:59.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:01.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:03.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:05.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:07.271: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:09.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:11.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:13.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:15.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:17.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:19.272: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:21.273: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:21.312: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:21.312: INFO: Unexpected error: <*errors.errorString | 0xc0001c9a20>: { s: "timed out waiting for the condition", } Nov 26 00:05:21.312: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000d20000) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 00:05:21.313: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:05:21.352 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfor\stype\=NodePort$'
test/e2e/framework/network/utils.go:866 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc001179260, {0x75c6f7c, 0x9}, 0xc003d82840) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc001179260, 0x7f2faa67ad38?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc001179260, 0x3c?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000b5a000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.4() test/e2e/network/loadbalancer.go:1332 +0x145 There were additional failures detected after the initial failure: [FAILED] Nov 26 00:04:51.819: failed to list events in namespace "esipp-5847": Get "https://34.168.120.117/api/v1/namespaces/esipp-5847/events": dial tcp 34.168.120.117:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 00:04:51.859: Couldn't delete ns: "esipp-5847": Delete "https://34.168.120.117/api/v1/namespaces/esipp-5847": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/esipp-5847", Err:(*net.OpError)(0xc0031243c0)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:02:59.247 Nov 26 00:02:59.247: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 00:02:59.249 Nov 26 00:02:59.288: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:01.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:03.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:05.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:07.329: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:09.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:11.329: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:13.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:15.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:17.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:19.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:21.329: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:23.328: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:25.329: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:27.329: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 00:04:24.742 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 00:04:24.828 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should work for type=NodePort test/e2e/network/loadbalancer.go:1314 STEP: creating a service esipp-5847/external-local-nodeport with type=NodePort and ExternalTrafficPolicy=Local 11/26/22 00:04:25.419 STEP: creating a pod to be part of the service external-local-nodeport 11/26/22 00:04:25.48 Nov 26 00:04:25.532: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 00:04:25.626: INFO: Found all 1 pods Nov 26 00:04:25.626: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [external-local-nodeport-dhpjs] Nov 26 00:04:25.626: INFO: Waiting up to 2m0s for pod "external-local-nodeport-dhpjs" in namespace "esipp-5847" to be "running and ready" Nov 26 00:04:25.719: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 93.510463ms Nov 26 00:04:25.719: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:27.762: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 2.135763692s Nov 26 00:04:27.762: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:30.320: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 4.694646093s Nov 26 00:04:30.320: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:31.804: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 6.178169137s Nov 26 00:04:31.804: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:33.889: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 8.263376804s Nov 26 00:04:33.889: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:35.791: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 10.165477335s Nov 26 00:04:35.791: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:37.854: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 12.228345842s Nov 26 00:04:37.854: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:39.807: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 14.181205719s Nov 26 00:04:39.807: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:41.796: INFO: Pod "external-local-nodeport-dhpjs": Phase="Pending", Reason="", readiness=false. Elapsed: 16.170281364s Nov 26 00:04:41.796: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-dhpjs' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:04:43.827: INFO: Pod "external-local-nodeport-dhpjs": Phase="Running", Reason="", readiness=true. Elapsed: 18.200771358s Nov 26 00:04:43.827: INFO: Pod "external-local-nodeport-dhpjs" satisfied condition "running and ready" Nov 26 00:04:43.827: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [external-local-nodeport-dhpjs] STEP: Performing setup for networking test in namespace esipp-5847 11/26/22 00:04:45.01 STEP: creating a selector 11/26/22 00:04:45.01 STEP: Creating the service pods in kubernetes 11/26/22 00:04:45.01 Nov 26 00:04:45.010: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Nov 26 00:04:45.437: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "esipp-5847" to be "running and ready" Nov 26 00:04:45.526: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 88.468241ms Nov 26 00:04:45.526: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:04:47.785: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 2.347987108s Nov 26 00:04:47.785: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:04:49.633: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.195471762s Nov 26 00:04:49.633: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 00:04:51.566: INFO: Encountered non-retryable error while getting pod esipp-5847/netserver-0: Get "https://34.168.120.117/api/v1/namespaces/esipp-5847/pods/netserver-0": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:51.566: INFO: Unexpected error: <*fmt.wrapError | 0xc00173e040>: { msg: "error while waiting for pod esipp-5847/netserver-0 to be running and ready: Get \"https://34.168.120.117/api/v1/namespaces/esipp-5847/pods/netserver-0\": dial tcp 34.168.120.117:443: connect: connection refused", err: <*url.Error | 0xc003d82000>{ Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/esipp-5847/pods/netserver-0", Err: <*net.OpError | 0xc001264050>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc00378ad20>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc00173e000>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, }, } Nov 26 00:04:51.566: FAIL: error while waiting for pod esipp-5847/netserver-0 to be running and ready: Get "https://34.168.120.117/api/v1/namespaces/esipp-5847/pods/netserver-0": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc001179260, {0x75c6f7c, 0x9}, 0xc003d82840) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc001179260, 0x7f2faa67ad38?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc001179260, 0x3c?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000b5a000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.4() test/e2e/network/loadbalancer.go:1332 +0x145 Nov 26 00:04:51.606: INFO: Unexpected error: <*url.Error | 0xc003218090>: { Op: "Delete", URL: "https://34.168.120.117/api/v1/namespaces/esipp-5847/services/external-local-nodeport", Err: <*net.OpError | 0xc003108000>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc003d82420>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc00097a000>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:04:51.606: FAIL: Delete "https://34.168.120.117/api/v1/namespaces/esipp-5847/services/external-local-nodeport": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func20.4.1() test/e2e/network/loadbalancer.go:1323 +0xe7 panic({0x70eb7e0, 0xc00056e930}) /usr/local/go/src/runtime/panic.go:884 +0x212 k8s.io/kubernetes/test/e2e/framework.Fail({0xc0017ac000, 0xd0}, {0xc0007a77c0?, 0xc0017ac000?, 0xc0007a77e8?}) test/e2e/framework/log.go:61 +0x145 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3f20, 0xc00173e040}, {0x0?, 0xc004d7ce90?, 0xc000b438e0?}) test/e2e/framework/expect.go:76 +0x267 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc001179260, {0x75c6f7c, 0x9}, 0xc003d82840) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc001179260, 0x7f2faa67ad38?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc001179260, 0x3c?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000b5a000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.4() test/e2e/network/loadbalancer.go:1332 +0x145 [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 00:04:51.607: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 Nov 26 00:04:51.647: INFO: Output of kubectl describe svc: Nov 26 00:04:51.647: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.120.117 --kubeconfig=/workspace/.kube/config --namespace=esipp-5847 describe svc --namespace=esipp-5847' Nov 26 00:04:51.779: INFO: rc: 1 Nov 26 00:04:51.780: INFO: [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:04:51.78 STEP: Collecting events from namespace "esipp-5847". 11/26/22 00:04:51.78 Nov 26 00:04:51.819: INFO: Unexpected error: failed to list events in namespace "esipp-5847": <*url.Error | 0xc00378aed0>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/esipp-5847/events", Err: <*net.OpError | 0xc00464a2d0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc003bac540>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc0030882a0>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:04:51.819: FAIL: failed to list events in namespace "esipp-5847": Get "https://34.168.120.117/api/v1/namespaces/esipp-5847/events": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc0018045c0, {0xc004d7ce90, 0xa}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc003050680}, {0xc004d7ce90, 0xa}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc001804650?, {0xc004d7ce90?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc000b5a000) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc00094f990?, 0xc0046a2f08?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc001b573b0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc00094f990?, 0x0?}, {0xae73300?, 0x0?, 0x0?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "esipp-5847" for this suite. 11/26/22 00:04:51.82 Nov 26 00:04:51.859: FAIL: Couldn't delete ns: "esipp-5847": Delete "https://34.168.120.117/api/v1/namespaces/esipp-5847": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/esipp-5847", Err:(*net.OpError)(0xc0031243c0)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc000b5a000) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc00094f8a0?, 0xc0046a1fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc00094f8a0?, 0x0?}, {0xae73300?, 0x5?, 0xc00007e780?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfrom\spods$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000bb6000) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func20.2() test/e2e/network/loadbalancer.go:1262 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:14:02.122 Nov 26 00:14:02.122: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 00:14:02.124 Nov 26 00:14:02.163: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:04.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:06.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:08.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:10.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:12.204: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:14.204: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:16.204: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:18.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:20.206: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:22.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:24.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:26.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:28.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:30.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:32.203: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:32.243: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:32.243: INFO: Unexpected error: <*errors.errorString | 0xc000209d20>: { s: "timed out waiting for the condition", } Nov 26 00:14:32.243: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000bb6000) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 00:14:32.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:14:32.283 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\sbe\sable\sto\schange\sthe\stype\sand\sports\sof\sa\sTCP\sservice\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc001327b30) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func19.2() test/e2e/network/loadbalancer.go:73 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:05:21.011 Nov 26 00:05:21.011: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 00:05:21.013 Nov 26 00:05:21.053: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:23.092: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:25.092: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:27.094: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:29.093: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:31.092: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:33.092: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:35.093: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:37.093: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:39.092: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:41.093: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:43.093: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:45.093: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:47.093: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:49.092: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:51.092: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:51.132: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:51.132: INFO: Unexpected error: <*errors.errorString | 0xc0001c99e0>: { s: "timed out waiting for the condition", } Nov 26 00:05:51.132: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc001327b30) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:05:51.132: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:05:51.173 [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\sbe\sable\sto\schange\sthe\stype\sand\sports\sof\sa\sUDP\sservice\s\[Slow\]$'
test/e2e/network/loadbalancer.go:448 k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:448 +0x123dfrom junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:04:25.101 Nov 26 00:04:25.101: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 00:04:25.102 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 00:04:25.328 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 00:04:25.43 [BeforeEach] [sig-network] LoadBalancers test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:65 [It] should be able to change the type and ports of a UDP service [Slow] test/e2e/network/loadbalancer.go:287 Nov 26 00:04:26.813: INFO: namespace for TCP test: loadbalancers-6737 STEP: creating a UDP service mutability-test with type=ClusterIP in namespace loadbalancers-6737 11/26/22 00:04:26.865 Nov 26 00:04:26.948: INFO: service port UDP: 80 STEP: creating a pod to be part of the UDP service mutability-test 11/26/22 00:04:26.948 Nov 26 00:04:27.000: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 00:04:27.071: INFO: Found all 1 pods Nov 26 00:04:27.071: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [mutability-test-cj5nq] Nov 26 00:04:27.071: INFO: Waiting up to 2m0s for pod "mutability-test-cj5nq" in namespace "loadbalancers-6737" to be "running and ready" Nov 26 00:04:27.119: INFO: Pod "mutability-test-cj5nq": Phase="Pending", Reason="", readiness=false. Elapsed: 47.790052ms Nov 26 00:04:27.119: INFO: Error evaluating pod condition running and ready: want pod 'mutability-test-cj5nq' on 'bootstrap-e2e-minion-group-4434' to be 'Running' but was 'Pending' Nov 26 00:04:29.359: INFO: Pod "mutability-test-cj5nq": Phase="Running", Reason="", readiness=false. Elapsed: 2.288277126s Nov 26 00:04:29.360: INFO: Error evaluating pod condition running and ready: pod 'mutability-test-cj5nq' on 'bootstrap-e2e-minion-group-4434' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:04:27 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:04:27 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:04:27 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:04:27 +0000 UTC }] Nov 26 00:04:31.312: INFO: Pod "mutability-test-cj5nq": Phase="Running", Reason="", readiness=true. Elapsed: 4.240538073s Nov 26 00:04:31.312: INFO: Pod "mutability-test-cj5nq" satisfied condition "running and ready" Nov 26 00:04:31.312: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [mutability-test-cj5nq] STEP: changing the UDP service to type=NodePort 11/26/22 00:04:31.312 Nov 26 00:04:31.749: INFO: UDP node port: 32498 STEP: hitting the UDP service's NodePort 11/26/22 00:04:31.749 Nov 26 00:04:31.749: INFO: Poking udp://34.168.8.98:32498 Nov 26 00:04:31.789: INFO: Poke("udp://34.168.8.98:32498"): read udp 10.60.181.93:37775->34.168.8.98:32498: read: connection refused Nov 26 00:04:33.790: INFO: Poking udp://34.168.8.98:32498 Nov 26 00:04:33.829: INFO: Poke("udp://34.168.8.98:32498"): read udp 10.60.181.93:52491->34.168.8.98:32498: read: connection refused Nov 26 00:04:35.790: INFO: Poking udp://34.168.8.98:32498 Nov 26 00:04:35.834: INFO: Poke("udp://34.168.8.98:32498"): success STEP: creating a static load balancer IP 11/26/22 00:04:35.834 Nov 26 00:04:37.850: INFO: Allocated static load balancer IP: 35.230.97.25 STEP: changing the UDP service to type=LoadBalancer 11/26/22 00:04:37.85 STEP: demoting the static IP to ephemeral 11/26/22 00:04:38.022 STEP: waiting for the UDP service to have a load balancer 11/26/22 00:04:39.756 Nov 26 00:04:39.756: INFO: Waiting up to 15m0s for service "mutability-test" to have a LoadBalancer Nov 26 00:04:51.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:53.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:55.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:57.859: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:59.857: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:01.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:03.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:05.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:07.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:09.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:11.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:13.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:15.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:17.859: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:19.859: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:21.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:23.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:25.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:27.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:29.857: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:31.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:33.859: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:35.859: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:37.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:39.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:41.859: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:43.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:45.857: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:47.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:49.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:51.858: INFO: Retrying .... error trying to get Service mutability-test: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-6737/services/mutability-test": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:08:33.887: INFO: UDP load balancer: 34.168.110.123 STEP: hitting the UDP service's NodePort 11/26/22 00:08:33.887 Nov 26 00:08:33.887: INFO: Poking udp://34.168.8.98:32498 Nov 26 00:08:33.928: INFO: Poke("udp://34.168.8.98:32498"): success STEP: hitting the UDP service's LoadBalancer 11/26/22 00:08:33.928 Nov 26 00:08:33.928: INFO: Poking udp://34.168.110.123:80 Nov 26 00:08:36.929: INFO: Poke("udp://34.168.110.123:80"): read udp 10.60.181.93:45912->34.168.110.123:80: i/o timeout Nov 26 00:08:38.930: INFO: Poking udp://34.168.110.123:80 Nov 26 00:08:38.971: INFO: Poke("udp://34.168.110.123:80"): success STEP: changing the UDP service's NodePort 11/26/22 00:08:38.971 Nov 26 00:08:39.362: INFO: UDP node port: 32499 STEP: hitting the UDP service's new NodePort 11/26/22 00:08:39.362 Nov 26 00:08:39.362: INFO: Poking udp://34.168.8.98:32499 Nov 26 00:08:39.401: INFO: Poke("udp://34.168.8.98:32499"): success STEP: checking the old UDP NodePort is closed 11/26/22 00:08:39.402 Nov 26 00:08:39.402: INFO: Poking udp://34.168.8.98:32498 Nov 26 00:08:39.442: INFO: Poke("udp://34.168.8.98:32498"): read udp 10.60.181.93:52112->34.168.8.98:32498: read: connection refused STEP: hitting the UDP service's LoadBalancer 11/26/22 00:08:39.442 Nov 26 00:08:39.442: INFO: Poking udp://34.168.110.123:80 Nov 26 00:08:39.481: INFO: Poke("udp://34.168.110.123:80"): success STEP: changing the UDP service's port 11/26/22 00:08:39.482 Nov 26 00:08:39.728: INFO: service port UDP: 81 STEP: hitting the UDP service's NodePort 11/26/22 00:08:39.728 Nov 26 00:08:39.729: INFO: Poking udp://34.168.8.98:32499 Nov 26 00:08:39.768: INFO: Poke("udp://34.168.8.98:32499"): success STEP: hitting the UDP service's LoadBalancer 11/26/22 00:08:39.768 Nov 26 00:08:39.768: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:39.807: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:35559->34.168.110.123:81: read: connection refused Nov 26 00:08:41.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:41.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:33013->34.168.110.123:81: read: connection refused Nov 26 00:08:43.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:43.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:48502->34.168.110.123:81: read: connection refused Nov 26 00:08:45.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:45.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:59629->34.168.110.123:81: read: connection refused Nov 26 00:08:47.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:47.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:44071->34.168.110.123:81: read: connection refused Nov 26 00:08:49.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:49.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:60301->34.168.110.123:81: read: connection refused Nov 26 00:08:51.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:51.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:50813->34.168.110.123:81: read: connection refused Nov 26 00:08:53.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:53.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:51832->34.168.110.123:81: read: connection refused Nov 26 00:08:55.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:55.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:56780->34.168.110.123:81: read: connection refused Nov 26 00:08:57.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:57.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:54625->34.168.110.123:81: read: connection refused Nov 26 00:08:59.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:08:59.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:46497->34.168.110.123:81: read: connection refused Nov 26 00:09:01.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:01.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:34389->34.168.110.123:81: read: connection refused Nov 26 00:09:03.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:03.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:42809->34.168.110.123:81: read: connection refused Nov 26 00:09:05.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:05.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:41912->34.168.110.123:81: read: connection refused Nov 26 00:09:07.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:07.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:37142->34.168.110.123:81: read: connection refused Nov 26 00:09:09.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:09.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:42224->34.168.110.123:81: read: connection refused Nov 26 00:09:11.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:11.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:44625->34.168.110.123:81: read: connection refused Nov 26 00:09:13.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:13.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:52672->34.168.110.123:81: read: connection refused Nov 26 00:09:15.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:15.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:40255->34.168.110.123:81: read: connection refused Nov 26 00:09:17.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:17.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:46456->34.168.110.123:81: read: connection refused Nov 26 00:09:19.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:19.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:60826->34.168.110.123:81: read: connection refused Nov 26 00:09:21.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:21.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:38267->34.168.110.123:81: read: connection refused Nov 26 00:09:23.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:23.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:44338->34.168.110.123:81: read: connection refused Nov 26 00:09:25.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:25.847: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:35934->34.168.110.123:81: read: connection refused ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 5m1.663s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 5m0.001s) test/e2e/network/loadbalancer.go:287 At [By Step] hitting the UDP service's LoadBalancer (Step Runtime: 46.995s) test/e2e/network/loadbalancer.go:443 Spec Goroutine goroutine 1062 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0010e4828, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x60?, 0x2fd9d05?, 0x10?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0001fe680?, 0xc0017e9cb0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x65cbc00?, 0xc000f88cf0?, 0x754e980?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.testReachableUDP({0xc0022ba180, 0xe}, 0x51, 0x0?) test/e2e/network/service.go:603 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:444 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:09:27.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:27.846: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:57372->34.168.110.123:81: read: connection refused Nov 26 00:09:29.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:32.808: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:37336->34.168.110.123:81: i/o timeout Nov 26 00:09:33.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:36.809: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:41359->34.168.110.123:81: i/o timeout Nov 26 00:09:37.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:40.809: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:33051->34.168.110.123:81: i/o timeout Nov 26 00:09:41.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:44.808: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:60219->34.168.110.123:81: i/o timeout Nov 26 00:09:45.808: INFO: Poking udp://34.168.110.123:81 ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 5m21.665s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 5m20.003s) test/e2e/network/loadbalancer.go:287 At [By Step] hitting the UDP service's LoadBalancer (Step Runtime: 1m6.997s) test/e2e/network/loadbalancer.go:443 Spec Goroutine goroutine 1062 [IO wait] internal/poll.runtime_pollWait(0x7f6a98201860, 0x72) /usr/local/go/src/runtime/netpoll.go:305 internal/poll.(*pollDesc).wait(0xc001543980?, 0xc004b304b0?, 0x0) /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 internal/poll.(*pollDesc).waitRead(...) /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 internal/poll.(*FD).Read(0xc001543980, {0xc004b304b0, 0x6, 0x6}) /usr/local/go/src/internal/poll/fd_unix.go:167 net.(*netFD).Read(0xc001543980, {0xc004b304b0?, 0xc0017e9818?, 0x2671252?}) /usr/local/go/src/net/fd_posix.go:55 net.(*conn).Read(0xc0036641a8, {0xc004b304b0?, 0xae40400?, 0xae40400?}) /usr/local/go/src/net/net.go:183 > k8s.io/kubernetes/test/e2e/network.pokeUDP({0xc0022ba180, 0xe}, 0x51, {0x75ca3e8, 0xa}, 0xc0017e9a70) test/e2e/network/service.go:562 > k8s.io/kubernetes/test/e2e/network.testReachableUDP.func1() test/e2e/network/service.go:593 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x7fadb00?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0010e4828, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x60?, 0x2fd9d05?, 0x10?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0001fe680?, 0xc0017e9cb0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x65cbc00?, 0xc000f88cf0?, 0x754e980?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.testReachableUDP({0xc0022ba180, 0xe}, 0x51, 0x0?) test/e2e/network/service.go:603 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:444 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:09:48.808: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:47488->34.168.110.123:81: i/o timeout Nov 26 00:09:49.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:52.809: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:38938->34.168.110.123:81: i/o timeout Nov 26 00:09:53.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:09:56.808: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:59036->34.168.110.123:81: i/o timeout Nov 26 00:09:57.807: INFO: Poking udp://34.168.110.123:81 Nov 26 00:10:00.809: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:44077->34.168.110.123:81: i/o timeout Nov 26 00:10:01.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:10:04.809: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:54718->34.168.110.123:81: i/o timeout Nov 26 00:10:05.808: INFO: Poking udp://34.168.110.123:81 ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 5m41.667s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 5m40.006s) test/e2e/network/loadbalancer.go:287 At [By Step] hitting the UDP service's LoadBalancer (Step Runtime: 1m27s) test/e2e/network/loadbalancer.go:443 Spec Goroutine goroutine 1062 [IO wait] internal/poll.runtime_pollWait(0x7f6a982013b0, 0x72) /usr/local/go/src/runtime/netpoll.go:305 internal/poll.(*pollDesc).wait(0xc0005d5780?, 0xc000f50ba0?, 0x0) /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 internal/poll.(*pollDesc).waitRead(...) /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 internal/poll.(*FD).Read(0xc0005d5780, {0xc000f50ba0, 0x6, 0x6}) /usr/local/go/src/internal/poll/fd_unix.go:167 net.(*netFD).Read(0xc0005d5780, {0xc000f50ba0?, 0xc0017e9818?, 0x2671252?}) /usr/local/go/src/net/fd_posix.go:55 net.(*conn).Read(0xc000886c78, {0xc000f50ba0?, 0xae40400?, 0xae40400?}) /usr/local/go/src/net/net.go:183 > k8s.io/kubernetes/test/e2e/network.pokeUDP({0xc0022ba180, 0xe}, 0x51, {0x75ca3e8, 0xa}, 0xc0017e9a70) test/e2e/network/service.go:562 > k8s.io/kubernetes/test/e2e/network.testReachableUDP.func1() test/e2e/network/service.go:593 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0x7fadb00?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0010e4828, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x60?, 0x2fd9d05?, 0x10?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0001fe680?, 0xc0017e9cb0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x65cbc00?, 0xc000f88cf0?, 0x754e980?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.testReachableUDP({0xc0022ba180, 0xe}, 0x51, 0x0?) test/e2e/network/service.go:603 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:444 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:10:08.808: INFO: Poke("udp://34.168.110.123:81"): read udp 10.60.181.93:39252->34.168.110.123:81: i/o timeout Nov 26 00:10:09.808: INFO: Poking udp://34.168.110.123:81 Nov 26 00:10:09.848: INFO: Poke("udp://34.168.110.123:81"): success STEP: Scaling the pods to 0 11/26/22 00:10:09.848 Nov 26 00:10:16.840: INFO: Waiting up to 2m0s for 0 pods to be created Nov 26 00:10:16.885: INFO: Found 1/0 pods - will retry Nov 26 00:10:18.929: INFO: Found 1/0 pods - will retry Nov 26 00:10:20.973: INFO: Found 1/0 pods - will retry Nov 26 00:10:23.015: INFO: Found 1/0 pods - will retry Nov 26 00:10:25.061: INFO: Found 1/0 pods - will retry ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 6m1.67s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 6m0.008s) test/e2e/network/loadbalancer.go:287 At [By Step] Scaling the pods to 0 (Step Runtime: 16.922s) test/e2e/network/loadbalancer.go:446 Spec Goroutine goroutine 1062 [sleep] time.Sleep(0x77359400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForPodsCreated(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:803 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).Scale(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:773 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:447 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:10:27.104: INFO: Found 1/0 pods - will retry Nov 26 00:10:29.146: INFO: Found 1/0 pods - will retry Nov 26 00:10:31.192: INFO: Found 1/0 pods - will retry Nov 26 00:10:33.234: INFO: Found 1/0 pods - will retry Nov 26 00:10:35.278: INFO: Found 1/0 pods - will retry Nov 26 00:10:37.322: INFO: Found 1/0 pods - will retry Nov 26 00:10:39.365: INFO: Found 1/0 pods - will retry Nov 26 00:10:41.410: INFO: Found 1/0 pods - will retry Nov 26 00:10:43.453: INFO: Found 1/0 pods - will retry Nov 26 00:10:45.497: INFO: Found 1/0 pods - will retry ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 6m21.672s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 6m20.01s) test/e2e/network/loadbalancer.go:287 At [By Step] Scaling the pods to 0 (Step Runtime: 36.924s) test/e2e/network/loadbalancer.go:446 Spec Goroutine goroutine 1062 [sleep] time.Sleep(0x77359400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForPodsCreated(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:803 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).Scale(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:773 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:447 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:10:47.541: INFO: Found 1/0 pods - will retry Nov 26 00:10:49.587: INFO: Found 1/0 pods - will retry Nov 26 00:10:51.635: INFO: Found 1/0 pods - will retry Nov 26 00:10:53.679: INFO: Found 1/0 pods - will retry Nov 26 00:10:55.723: INFO: Found 1/0 pods - will retry Nov 26 00:10:57.767: INFO: Found 1/0 pods - will retry Nov 26 00:10:59.811: INFO: Found 1/0 pods - will retry Nov 26 00:11:01.857: INFO: Found 1/0 pods - will retry Nov 26 00:11:03.907: INFO: Found 1/0 pods - will retry Nov 26 00:11:05.950: INFO: Found 1/0 pods - will retry ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 6m41.674s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 6m40.012s) test/e2e/network/loadbalancer.go:287 At [By Step] Scaling the pods to 0 (Step Runtime: 56.926s) test/e2e/network/loadbalancer.go:446 Spec Goroutine goroutine 1062 [sleep] time.Sleep(0x77359400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForPodsCreated(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:803 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).Scale(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:773 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:447 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:11:07.993: INFO: Found 1/0 pods - will retry Nov 26 00:11:10.036: INFO: Found 1/0 pods - will retry Nov 26 00:11:12.080: INFO: Found 1/0 pods - will retry Nov 26 00:11:14.123: INFO: Found 1/0 pods - will retry Nov 26 00:11:16.166: INFO: Found 1/0 pods - will retry Nov 26 00:11:18.212: INFO: Found 1/0 pods - will retry Nov 26 00:11:20.258: INFO: Found 1/0 pods - will retry Nov 26 00:11:22.302: INFO: Found 1/0 pods - will retry Nov 26 00:11:24.346: INFO: Found 1/0 pods - will retry Nov 26 00:11:26.390: INFO: Found 1/0 pods - will retry ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 7m1.677s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 7m0.015s) test/e2e/network/loadbalancer.go:287 At [By Step] Scaling the pods to 0 (Step Runtime: 1m16.929s) test/e2e/network/loadbalancer.go:446 Spec Goroutine goroutine 1062 [sleep] time.Sleep(0x77359400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForPodsCreated(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:803 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).Scale(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:773 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:447 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:11:28.437: INFO: Found 1/0 pods - will retry Nov 26 00:11:30.481: INFO: Found 1/0 pods - will retry Nov 26 00:11:32.529: INFO: Found 1/0 pods - will retry Nov 26 00:11:34.573: INFO: Found 1/0 pods - will retry Nov 26 00:11:36.616: INFO: Found 1/0 pods - will retry Nov 26 00:11:38.662: INFO: Found 1/0 pods - will retry Nov 26 00:11:40.708: INFO: Found 1/0 pods - will retry Nov 26 00:11:42.752: INFO: Found 1/0 pods - will retry Nov 26 00:11:44.795: INFO: Found 1/0 pods - will retry ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 7m21.679s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 7m20.017s) test/e2e/network/loadbalancer.go:287 At [By Step] Scaling the pods to 0 (Step Runtime: 1m36.931s) test/e2e/network/loadbalancer.go:446 Spec Goroutine goroutine 1062 [sleep] time.Sleep(0x77359400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForPodsCreated(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:803 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).Scale(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:773 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:447 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:11:46.838: INFO: Found 1/0 pods - will retry Nov 26 00:11:48.881: INFO: Found 1/0 pods - will retry Nov 26 00:11:50.925: INFO: Found 1/0 pods - will retry Nov 26 00:11:52.977: INFO: Found 1/0 pods - will retry Nov 26 00:11:55.020: INFO: Found 1/0 pods - will retry Nov 26 00:11:57.063: INFO: Found 1/0 pods - will retry Nov 26 00:11:59.108: INFO: Found 1/0 pods - will retry Nov 26 00:12:01.152: INFO: Found 1/0 pods - will retry Nov 26 00:12:03.196: INFO: Found 1/0 pods - will retry Nov 26 00:12:05.239: INFO: Found 1/0 pods - will retry ------------------------------ Progress Report for Ginkgo Process #13 Automatically polling progress: [sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow] (Spec Runtime: 7m41.681s) test/e2e/network/loadbalancer.go:287 In [It] (Node Runtime: 7m40.02s) test/e2e/network/loadbalancer.go:287 At [By Step] Scaling the pods to 0 (Step Runtime: 1m56.934s) test/e2e/network/loadbalancer.go:446 Spec Goroutine goroutine 1062 [sleep] time.Sleep(0x77359400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForPodsCreated(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:803 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).Scale(0xc004b59c20, 0x0) test/e2e/framework/service/jig.go:773 > k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:447 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000951380}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:12:07.281: INFO: Found 1/0 pods - will retry Nov 26 00:12:09.325: INFO: Found 1/0 pods - will retry Nov 26 00:12:11.367: INFO: Found 1/0 pods - will retry Nov 26 00:12:13.413: INFO: Found 1/0 pods - will retry Nov 26 00:12:15.462: INFO: Found 1/0 pods - will retry Nov 26 00:12:17.463: INFO: Unexpected error: <*errors.errorString | 0xc000ee65e0>: { s: "failed waiting for pods: timeout waiting for 0 pods to be created", } Nov 26 00:12:17.463: FAIL: failed waiting for pods: timeout waiting for 0 pods to be created Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func19.4() test/e2e/network/loadbalancer.go:448 +0x123d [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:12:17.464: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 Nov 26 00:12:17.548: INFO: Output of kubectl describe svc: Nov 26 00:12:17.548: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.120.117 --kubeconfig=/workspace/.kube/config --namespace=loadbalancers-6737 describe svc --namespace=loadbalancers-6737' Nov 26 00:12:17.919: INFO: stderr: "" Nov 26 00:12:17.919: INFO: stdout: "Name: mutability-test\nNamespace: loadbalancers-6737\nLabels: testid=mutability-test-4ce72b67-e82e-4050-9e49-600c0397f028\nAnnotations: <none>\nSelector: testid=mutability-test-4ce72b67-e82e-4050-9e49-600c0397f028\nType: LoadBalancer\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.218.183\nIPs: 10.0.218.183\nLoadBalancer Ingress: 34.168.110.123\nPort: <unset> 81/UDP\nTargetPort: 80/UDP\nNodePort: <unset> 32499/UDP\nEndpoints: 10.64.3.95:80\nSession Affinity: None\nExternal Traffic Policy: Cluster\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Type 7m39s service-controller NodePort -> LoadBalancer\n Normal EnsuredLoadBalancer 3m45s service-controller Ensured load balancer\n Normal EnsuringLoadBalancer 2m57s (x2 over 4m19s) service-controller Ensuring load balancer\n" Nov 26 00:12:17.919: INFO: Name: mutability-test Namespace: loadbalancers-6737 Labels: testid=mutability-test-4ce72b67-e82e-4050-9e49-600c0397f028 Annotations: <none> Selector: testid=mutability-test-4ce72b67-e82e-4050-9e49-600c0397f028 Type: LoadBalancer IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.218.183 IPs: 10.0.218.183 LoadBalancer Ingress: 34.168.110.123 Port: <unset> 81/UDP TargetPort: 80/UDP NodePort: <unset> 32499/UDP Endpoints: 10.64.3.95:80 Session Affinity: None External Traffic Policy: Cluster Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Type 7m39s service-controller NodePort -> LoadBalancer Normal EnsuredLoadBalancer 3m45s service-controller Ensured load balancer Normal EnsuringLoadBalancer 2m57s (x2 over 4m19s) service-controller Ensuring load balancer [DeferCleanup (Each)] [sig-network] LoadBalancers test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:12:17.919 STEP: Collecting events from namespace "loadbalancers-6737". 11/26/22 00:12:17.919 STEP: Found 11 events. 11/26/22 00:12:17.965 Nov 26 00:12:17.965: INFO: At 2022-11-26 00:04:26 +0000 UTC - event for mutability-test: {replication-controller } SuccessfulCreate: Created pod: mutability-test-cj5nq Nov 26 00:12:17.965: INFO: At 2022-11-26 00:04:27 +0000 UTC - event for mutability-test-cj5nq: {default-scheduler } Scheduled: Successfully assigned loadbalancers-6737/mutability-test-cj5nq to bootstrap-e2e-minion-group-4434 Nov 26 00:12:17.965: INFO: At 2022-11-26 00:04:27 +0000 UTC - event for mutability-test-cj5nq: {kubelet bootstrap-e2e-minion-group-4434} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 00:12:17.965: INFO: At 2022-11-26 00:04:27 +0000 UTC - event for mutability-test-cj5nq: {kubelet bootstrap-e2e-minion-group-4434} Created: Created container netexec Nov 26 00:12:17.965: INFO: At 2022-11-26 00:04:27 +0000 UTC - event for mutability-test-cj5nq: {kubelet bootstrap-e2e-minion-group-4434} Started: Started container netexec Nov 26 00:12:17.965: INFO: At 2022-11-26 00:04:38 +0000 UTC - event for mutability-test: {service-controller } Type: NodePort -> LoadBalancer Nov 26 00:12:17.965: INFO: At 2022-11-26 00:07:58 +0000 UTC - event for mutability-test: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 00:12:17.965: INFO: At 2022-11-26 00:08:32 +0000 UTC - event for mutability-test: {service-controller } EnsuredLoadBalancer: Ensured load balancer Nov 26 00:12:17.965: INFO: At 2022-11-26 00:09:15 +0000 UTC - event for mutability-test-cj5nq: {kubelet bootstrap-e2e-minion-group-4434} Killing: Stopping container netexec Nov 26 00:12:17.965: INFO: At 2022-11-26 00:09:15 +0000 UTC - event for mutability-test-cj5nq: {kubelet bootstrap-e2e-minion-group-4434} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 00:12:17.965: INFO: At 2022-11-26 00:09:16 +0000 UTC - event for mutability-test-cj5nq: {kubelet bootstrap-e2e-minion-group-4434} Unhealthy: Readiness probe failed: Get "http://10.64.3.61:80/hostName": dial tcp 10.64.3.61:80: i/o timeout (Client.Timeout exceeded while awaiting headers) Nov 26 00:12:18.010: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 00:12:18.010: INFO: mutability-test-cj5nq bootstrap-e2e-minion-group-4434 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:04:27 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:09:16 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:09:16 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:04:27 +0000 UTC }] Nov 26 00:12:18.010: INFO: Nov 26 00:12:18.118: INFO: Logging node info for node bootstrap-e2e-master Nov 26 00:12:18.161: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master fd5fd34c-05e8-4c7e-8cbe-bf91f0f95cea 7952 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 00:12:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.120.117,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:4d77311e15d4bb4a15d85de5a36cea94,SystemUUID:4d77311e-15d4-bb4a-15d8-5de5a36cea94,BootID:80daeaca-84b8-4927-98e9-a38242975836,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:124989749,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:57659704,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:12:18.161: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 00:12:18.213: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 00:12:18.295: INFO: metadata-proxy-v0.1-thx76 started at 2022-11-25 23:56:34 +0000 UTC (0+2 container statuses recorded) Nov 26 00:12:18.295: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:12:18.295: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:12:18.295: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.295: INFO: Container kube-controller-manager ready: false, restart count 5 Nov 26 00:12:18.295: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.295: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:12:18.295: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.295: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:12:18.295: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.295: INFO: Container konnectivity-server-container ready: true, restart count 1 Nov 26 00:12:18.295: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.295: INFO: Container kube-addon-manager ready: true, restart count 1 Nov 26 00:12:18.295: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.295: INFO: Container l7-lb-controller ready: false, restart count 5 Nov 26 00:12:18.295: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.295: INFO: Container kube-scheduler ready: true, restart count 5 Nov 26 00:12:18.295: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.295: INFO: Container kube-apiserver ready: true, restart count 2 Nov 26 00:12:18.529: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 00:12:18.529: INFO: Logging node info for node bootstrap-e2e-minion-group-4434 Nov 26 00:12:18.574: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-4434 1aba3539-104b-4667-ab07-196915781437 7933 0 2022-11-25 23:56:41 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-4434 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-4434 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1058":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2121":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-9066":"bootstrap-e2e-minion-group-4434","csi-hostpath-provisioning-8288":"bootstrap-e2e-minion-group-4434","csi-hostpath-provisioning-985":"bootstrap-e2e-minion-group-4434","csi-mock-csi-mock-volumes-2299":"bootstrap-e2e-minion-group-4434"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:08:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:11:46 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:12:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-4434,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:11:46 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:08:33 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:08:33 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:08:33 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:08:33 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.8.98,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:e4112b9ebf318dd47967311e73935166,SystemUUID:e4112b9e-bf31-8dd4-7967-311e73935166,BootID:519ea9fb-1f7c-420e-8cea-cf36b5a7caca,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},},Config:nil,},} Nov 26 00:12:18.574: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-4434 Nov 26 00:12:18.624: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-4434 Nov 26 00:12:18.748: INFO: pod-secrets-b416252b-41f0-47a8-a1f1-2904f5649ea7 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:12:18.748: INFO: metadata-proxy-v0.1-kdtvq started at 2022-11-25 23:56:42 +0000 UTC (0+2 container statuses recorded) Nov 26 00:12:18.748: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:12:18.748: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:12:18.748: INFO: pod-570aaca2-5565-4c62-89d3-a199c7b4ebbb started at 2022-11-25 23:58:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:12:18.748: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:02:52 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:18.748: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container hostpath ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 00:12:18.748: INFO: hostexec-bootstrap-e2e-minion-group-4434-9kcrr started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container agnhost-container ready: false, restart count 5 Nov 26 00:12:18.748: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:18.748: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container hostpath ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 00:12:18.748: INFO: netserver-0 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container webserver ready: true, restart count 4 Nov 26 00:12:18.748: INFO: pod-e24536f7-0c3d-44a2-ab47-cf68d9a28e12 started at 2022-11-26 00:04:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:12:18.748: INFO: nfs-server started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container nfs-server ready: false, restart count 0 Nov 26 00:12:18.748: INFO: pod-secrets-a50040d5-9c04-4844-ad21-907877e01b2f started at 2022-11-26 00:08:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:12:18.748: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:18.748: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container hostpath ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 00:12:18.748: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 00:12:18.748: INFO: csi-mockplugin-0 started at 2022-11-25 23:59:02 +0000 UTC (0+3 container statuses recorded) Nov 26 00:12:18.748: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:12:18.748: INFO: Container driver-registrar ready: true, restart count 3 Nov 26 00:12:18.748: INFO: Container mock ready: true, restart count 3 Nov 26 00:12:18.748: INFO: ss-1 started at 2022-11-26 00:01:13 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container webserver ready: true, restart count 5 Nov 26 00:12:18.748: INFO: addon-reconcile-test-qsrst started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container addon-reconcile-test ready: true, restart count 0 Nov 26 00:12:18.748: INFO: kube-proxy-bootstrap-e2e-minion-group-4434 started at 2022-11-25 23:56:41 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 00:12:18.748: INFO: netserver-0 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container webserver ready: false, restart count 6 Nov 26 00:12:18.748: INFO: pod-93ad783f-bd8c-43cd-b936-dc278433c338 started at 2022-11-26 00:04:42 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:12:18.748: INFO: test-hostpath-type-f7tpn started at 2022-11-26 00:09:20 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:12:18.748: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:18.748: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:12:18.748: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 00:12:18.748: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 00:12:18.748: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 00:12:18.748: INFO: Container hostpath ready: true, restart count 4 Nov 26 00:12:18.748: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 00:12:18.748: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:12:18.748: INFO: hostexec-bootstrap-e2e-minion-group-4434-4ctv8 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:12:18.748: INFO: pvc-tester-hjwtq started at 2022-11-26 00:04:35 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:12:18.748: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:30 +0000 UTC (0+4 container statuses recorded) Nov 26 00:12:18.748: INFO: Container busybox ready: false, restart count 4 Nov 26 00:12:18.748: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:12:18.748: INFO: Container driver-registrar ready: false, restart count 4 Nov 26 00:12:18.748: INFO: Container mock ready: false, restart count 4 Nov 26 00:12:18.748: INFO: hostexec-bootstrap-e2e-minion-group-4434-hc6kp started at 2022-11-26 00:08:02 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:12:18.748: INFO: affinity-lb-esipp-transition-9jtt7 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container affinity-lb-esipp-transition ready: true, restart count 1 Nov 26 00:12:18.748: INFO: hostexec-bootstrap-e2e-minion-group-4434-x8nd2 started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:12:18.748: INFO: mutability-test-cj5nq started at 2022-11-26 00:04:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container netexec ready: true, restart count 1 Nov 26 00:12:18.748: INFO: failure-2 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container failure-2 ready: true, restart count 2 Nov 26 00:12:18.748: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:18.748: INFO: Container csi-attacher ready: false, restart count 4 Nov 26 00:12:18.748: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 00:12:18.748: INFO: Container csi-resizer ready: false, restart count 4 Nov 26 00:12:18.748: INFO: Container csi-snapshotter ready: false, restart count 4 Nov 26 00:12:18.748: INFO: Container hostpath ready: false, restart count 4 Nov 26 00:12:18.748: INFO: Container liveness-probe ready: false, restart count 4 Nov 26 00:12:18.748: INFO: Container node-driver-registrar ready: false, restart count 4 Nov 26 00:12:18.748: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:59:16 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:18.748: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 00:12:18.748: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:12:18.748: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 00:12:18.748: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 00:12:18.748: INFO: Container hostpath ready: false, restart count 5 Nov 26 00:12:18.748: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 00:12:18.748: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 00:12:18.748: INFO: hostexec-bootstrap-e2e-minion-group-4434-2cwpc started at 2022-11-26 00:09:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:12:18.748: INFO: hostexec-bootstrap-e2e-minion-group-4434-x9kjc started at 2022-11-26 00:09:36 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:12:18.748: INFO: pod-1b8bd600-f0ed-41f8-80b4-a6b12aef2c5d started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:12:18.748: INFO: konnectivity-agent-9h6nk started at 2022-11-25 23:56:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:18.748: INFO: Container konnectivity-agent ready: false, restart count 6 Nov 26 00:12:18.748: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:18.748: INFO: Container csi-attacher ready: true, restart count 6 Nov 26 00:12:18.749: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:12:18.749: INFO: Container csi-resizer ready: true, restart count 6 Nov 26 00:12:18.749: INFO: Container csi-snapshotter ready: true, restart count 6 Nov 26 00:12:18.749: INFO: Container hostpath ready: true, restart count 6 Nov 26 00:12:18.749: INFO: Container liveness-probe ready: true, restart count 6 Nov 26 00:12:18.749: INFO: Container node-driver-registrar ready: true, restart count 6 Nov 26 00:12:19.095: INFO: Latency metrics for node bootstrap-e2e-minion-group-4434 Nov 26 00:12:19.095: INFO: Logging node info for node bootstrap-e2e-minion-group-51gr Nov 26 00:12:19.138: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-51gr 739f23c9-858a-495c-bf21-9f7320b53ec4 7968 0 2022-11-25 23:56:31 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-51gr kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-51gr topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-2541":"bootstrap-e2e-minion-group-51gr"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 00:09:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:11:37 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:12:15 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-51gr,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:11:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:39 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:12:07 +0000 UTC,LastTransitionTime:2022-11-25 23:56:33 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.82.95.192,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:c1fa84483022b650834cff54e6b41aff,SystemUUID:c1fa8448-3022-b650-834c-ff54e6b41aff,BootID:3164b9a2-246e-435e-be83-42c92c567f8b,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-2541^f0f7e720-6d1d-11ed-827c-ce22e910fefa],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-2541^f0f7e720-6d1d-11ed-827c-ce22e910fefa,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-2541^665b16fc-6d1e-11ed-827c-ce22e910fefa,DevicePath:,},},Config:nil,},} Nov 26 00:12:19.139: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-51gr Nov 26 00:12:19.184: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-51gr Nov 26 00:12:19.247: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:12:19.247: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:12:19.247: INFO: Container driver-registrar ready: true, restart count 1 Nov 26 00:12:19.247: INFO: Container mock ready: true, restart count 1 Nov 26 00:12:19.247: INFO: csi-mockplugin-attacher-0 started at 2022-11-25 23:58:36 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:12:19.247: INFO: pvc-volume-tester-d2gcf started at 2022-11-26 00:06:59 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container volume-tester ready: true, restart count 0 Nov 26 00:12:19.247: INFO: csi-mockplugin-0 started at 2022-11-25 23:58:36 +0000 UTC (0+3 container statuses recorded) Nov 26 00:12:19.247: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:12:19.247: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 00:12:19.247: INFO: Container mock ready: false, restart count 6 Nov 26 00:12:19.247: INFO: hostexec-bootstrap-e2e-minion-group-51gr-gncwt started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:12:19.247: INFO: affinity-lb-esipp-transition-gjpq2 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container affinity-lb-esipp-transition ready: true, restart count 1 Nov 26 00:12:19.247: INFO: kube-proxy-bootstrap-e2e-minion-group-51gr started at 2022-11-25 23:56:31 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container kube-proxy ready: true, restart count 6 Nov 26 00:12:19.247: INFO: coredns-6d97d5ddb-6vx5m started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container coredns ready: false, restart count 6 Nov 26 00:12:19.247: INFO: konnectivity-agent-sg59x started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container konnectivity-agent ready: true, restart count 6 Nov 26 00:12:19.247: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:00 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:19.247: INFO: Container csi-attacher ready: false, restart count 4 Nov 26 00:12:19.247: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 00:12:19.247: INFO: Container csi-resizer ready: false, restart count 4 Nov 26 00:12:19.247: INFO: Container csi-snapshotter ready: false, restart count 4 Nov 26 00:12:19.247: INFO: Container hostpath ready: false, restart count 4 Nov 26 00:12:19.247: INFO: Container liveness-probe ready: false, restart count 4 Nov 26 00:12:19.247: INFO: Container node-driver-registrar ready: false, restart count 4 Nov 26 00:12:19.247: INFO: metadata-proxy-v0.1-9xnlr started at 2022-11-25 23:56:32 +0000 UTC (0+2 container statuses recorded) Nov 26 00:12:19.247: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:12:19.247: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:12:19.247: INFO: volume-snapshot-controller-0 started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container volume-snapshot-controller ready: true, restart count 5 Nov 26 00:12:19.247: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:29 +0000 UTC (0+4 container statuses recorded) Nov 26 00:12:19.247: INFO: Container busybox ready: true, restart count 5 Nov 26 00:12:19.247: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 00:12:19.247: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 00:12:19.247: INFO: Container mock ready: false, restart count 5 Nov 26 00:12:19.247: INFO: ss-2 started at 2022-11-26 00:01:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container webserver ready: false, restart count 6 Nov 26 00:12:19.247: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:12:19.247: INFO: pvc-volume-tester-qbnwf started at 2022-11-26 00:04:41 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:12:19.247: INFO: l7-default-backend-8549d69d99-97xrr started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 00:12:19.247: INFO: netserver-1 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container webserver ready: true, restart count 7 Nov 26 00:12:19.247: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:19.247: INFO: Container csi-attacher ready: false, restart count 7 Nov 26 00:12:19.247: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 00:12:19.247: INFO: Container csi-resizer ready: false, restart count 7 Nov 26 00:12:19.247: INFO: Container csi-snapshotter ready: false, restart count 7 Nov 26 00:12:19.247: INFO: Container hostpath ready: false, restart count 7 Nov 26 00:12:19.247: INFO: Container liveness-probe ready: false, restart count 7 Nov 26 00:12:19.247: INFO: Container node-driver-registrar ready: false, restart count 7 Nov 26 00:12:19.247: INFO: hostexec-bootstrap-e2e-minion-group-51gr-jkx6v started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:12:19.247: INFO: coredns-6d97d5ddb-7cmct started at 2022-11-25 23:56:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container coredns ready: false, restart count 7 Nov 26 00:12:19.247: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:00:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:19.247: INFO: Container csi-attacher ready: false, restart count 2 Nov 26 00:12:19.247: INFO: Container csi-provisioner ready: false, restart count 2 Nov 26 00:12:19.247: INFO: Container csi-resizer ready: false, restart count 2 Nov 26 00:12:19.247: INFO: Container csi-snapshotter ready: false, restart count 2 Nov 26 00:12:19.247: INFO: Container hostpath ready: false, restart count 2 Nov 26 00:12:19.247: INFO: Container liveness-probe ready: false, restart count 2 Nov 26 00:12:19.247: INFO: Container node-driver-registrar ready: false, restart count 2 Nov 26 00:12:19.247: INFO: pvc-volume-tester-d9qcf started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:12:19.247: INFO: pod-subpath-test-preprovisionedpv-zqzf started at 2022-11-26 00:04:41 +0000 UTC (1+2 container statuses recorded) Nov 26 00:12:19.247: INFO: Init container init-volume-preprovisionedpv-zqzf ready: true, restart count 4 Nov 26 00:12:19.247: INFO: Container test-container-subpath-preprovisionedpv-zqzf ready: false, restart count 5 Nov 26 00:12:19.247: INFO: Container test-container-volume-preprovisionedpv-zqzf ready: false, restart count 5 Nov 26 00:12:19.247: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:19.247: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:12:19.247: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:12:19.247: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:12:19.247: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:12:19.247: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:12:19.247: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:12:19.247: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:12:19.247: INFO: kube-dns-autoscaler-5f6455f985-7kdrd started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container autoscaler ready: false, restart count 6 Nov 26 00:12:19.247: INFO: netserver-1 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.247: INFO: Container webserver ready: false, restart count 4 Nov 26 00:12:19.480: INFO: Latency metrics for node bootstrap-e2e-minion-group-51gr Nov 26 00:12:19.480: INFO: Logging node info for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:12:19.523: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-pjt7 5f55dd6b-a4d8-42f3-9e85-83e83c8dc9de 7871 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-pjt7 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-pjt7 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-6349":"bootstrap-e2e-minion-group-pjt7","csi-mock-csi-mock-volumes-8318":"csi-mock-csi-mock-volumes-8318","csi-mock-csi-mock-volumes-8391":"bootstrap-e2e-minion-group-pjt7"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:07:59 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:11:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:11:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-pjt7,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:11:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:10:35 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:10:35 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:10:35 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:10:35 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.124.11,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:f7ade85e43e2873500c8f33f09edf4a9,SystemUUID:f7ade85e-43e2-8735-00c8-f33f09edf4a9,BootID:07ab1c04-9bf6-4a67-bfa8-8d3160253b07,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9 kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6,DevicePath:,},},Config:nil,},} Nov 26 00:12:19.523: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:12:19.568: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-pjt7 Nov 26 00:12:19.656: INFO: ss-0 started at 2022-11-25 23:59:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container webserver ready: false, restart count 6 Nov 26 00:12:19.656: INFO: external-local-nodeport-dhpjs started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container netexec ready: false, restart count 1 Nov 26 00:12:19.656: INFO: pod-cc7edce3-35cc-4f45-bad6-a784001395c6 started at 2022-11-26 00:00:17 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:12:19.656: INFO: konnectivity-agent-ft6wq started at 2022-11-25 23:56:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container konnectivity-agent ready: false, restart count 6 Nov 26 00:12:19.656: INFO: netserver-2 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container webserver ready: true, restart count 7 Nov 26 00:12:19.656: INFO: pod-configmaps-283e1a65-2a1e-4f8e-9383-eeee204154b1 started at 2022-11-25 23:58:30 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:12:19.656: INFO: addon-reconcile-test-ftkd6 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container addon-reconcile-test ready: true, restart count 1 Nov 26 00:12:19.656: INFO: test-hostpath-type-gjrf5 started at 2022-11-26 00:09:39 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:12:19.656: INFO: nfs-server started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container nfs-server ready: false, restart count 3 Nov 26 00:12:19.656: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-wfjcd started at 2022-11-26 00:02:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:12:19.656: INFO: nfs-io-client started at 2022-11-26 00:04:25 +0000 UTC (1+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Init container nfs-io-init ready: true, restart count 0 Nov 26 00:12:19.656: INFO: Container nfs-io-client ready: false, restart count 0 Nov 26 00:12:19.656: INFO: test-hostpath-type-lx6tk started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:12:19.656: INFO: test-hostpath-type-245dt started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:12:19.656: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:12:19.656: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:12:19.656: INFO: Container driver-registrar ready: true, restart count 3 Nov 26 00:12:19.656: INFO: Container mock ready: true, restart count 3 Nov 26 00:12:19.656: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:12:19.656: INFO: csi-mockplugin-0 started at 2022-11-26 00:08:03 +0000 UTC (0+4 container statuses recorded) Nov 26 00:12:19.656: INFO: Container busybox ready: true, restart count 2 Nov 26 00:12:19.656: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 00:12:19.656: INFO: Container driver-registrar ready: true, restart count 2 Nov 26 00:12:19.656: INFO: Container mock ready: true, restart count 2 Nov 26 00:12:19.656: INFO: test-hostpath-type-24tqc started at 2022-11-26 00:09:42 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container host-path-testing ready: true, restart count 0 Nov 26 00:12:19.656: INFO: kube-proxy-bootstrap-e2e-minion-group-pjt7 started at 2022-11-25 23:56:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 00:12:19.656: INFO: metadata-proxy-v0.1-9jgjn started at 2022-11-25 23:56:35 +0000 UTC (0+2 container statuses recorded) Nov 26 00:12:19.656: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:12:19.656: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:12:19.656: INFO: pod-subpath-test-preprovisionedpv-92c8 started at 2022-11-26 00:02:55 +0000 UTC (1+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Init container init-volume-preprovisionedpv-92c8 ready: true, restart count 0 Nov 26 00:12:19.656: INFO: Container test-container-subpath-preprovisionedpv-92c8 ready: false, restart count 0 Nov 26 00:12:19.656: INFO: external-provisioner-5cfqt started at 2022-11-26 00:09:22 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container nfs-provisioner ready: true, restart count 0 Nov 26 00:12:19.656: INFO: test-hostpath-type-x56nj started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container host-path-sh-testing ready: false, restart count 0 Nov 26 00:12:19.656: INFO: pod-secrets-890a9a5b-57be-471c-8757-4aad820ed6d0 started at 2022-11-25 23:58:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:12:19.656: INFO: var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364 started at 2022-11-26 00:01:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container dapi-container ready: false, restart count 0 Nov 26 00:12:19.656: INFO: hostpath-injector started at 2022-11-25 23:58:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container hostpath-injector ready: false, restart count 0 Nov 26 00:12:19.656: INFO: affinity-lb-esipp-transition-wvltg started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container affinity-lb-esipp-transition ready: true, restart count 3 Nov 26 00:12:19.656: INFO: external-provisioner-p6q4d started at 2022-11-26 00:04:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container nfs-provisioner ready: false, restart count 4 Nov 26 00:12:19.656: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-9qlmb started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:12:19.656: INFO: test-hostpath-type-n5z6m started at 2022-11-26 00:04:48 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:12:19.656: INFO: pod-configmaps-39454904-1ea1-4326-806f-d840f1ec6aab started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:12:19.656: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:19.656: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 00:12:19.656: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:12:19.656: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 00:12:19.656: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 00:12:19.656: INFO: Container hostpath ready: false, restart count 5 Nov 26 00:12:19.656: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 00:12:19.656: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 00:12:19.656: INFO: test-hostpath-type-whtq5 started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:12:19.656: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:48 +0000 UTC (0+7 container statuses recorded) Nov 26 00:12:19.656: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:12:19.656: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:12:19.656: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:12:19.656: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:12:19.656: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:12:19.656: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:12:19.656: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 00:12:19.656: INFO: netserver-2 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container webserver ready: true, restart count 1 Nov 26 00:12:19.656: INFO: test-hostpath-type-9nghx started at 2022-11-26 00:04:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:12:19.656: INFO: metrics-server-v0.5.2-867b8754b9-c8h52 started at 2022-11-25 23:57:03 +0000 UTC (0+2 container statuses recorded) Nov 26 00:12:19.656: INFO: Container metrics-server ready: false, restart count 6 Nov 26 00:12:19.656: INFO: Container metrics-server-nanny ready: false, restart count 7 Nov 26 00:12:19.656: INFO: pod-back-off-image started at 2022-11-26 00:00:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container back-off ready: false, restart count 7 Nov 26 00:12:19.656: INFO: pod-configmaps-607fb46f-a546-474e-99da-bccf05cace4e started at 2022-11-25 23:59:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:12:19.656: INFO: inclusterclient started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container inclusterclient ready: false, restart count 0 Nov 26 00:12:19.656: INFO: pod-subpath-test-inlinevolume-xjdn started at 2022-11-26 00:04:26 +0000 UTC (1+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Init container init-volume-inlinevolume-xjdn ready: true, restart count 0 Nov 26 00:12:19.656: INFO: Container test-container-subpath-inlinevolume-xjdn ready: false, restart count 0 Nov 26 00:12:19.656: INFO: pvc-volume-tester-wmxdq started at 2022-11-26 00:04:47 +0000 UTC (0+1 container statuses recorded) Nov 26 00:12:19.656: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:12:19.935: INFO: Latency metrics for node bootstrap-e2e-minion-group-pjt7 [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193 STEP: Destroying namespace "loadbalancers-6737" for this suite. 11/26/22 00:12:19.935
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\sbe\sable\sto\screate\sLoadBalancer\sService\swithout\sNodePort\sand\schange\sit\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0012d84b0) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func19.2() test/e2e/network/loadbalancer.go:73 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:14:33.212 Nov 26 00:14:33.212: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 00:14:33.214 Nov 26 00:14:33.254: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:35.295: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:37.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:39.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:41.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:43.293: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:45.293: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:47.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:49.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:51.293: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:53.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:55.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:57.293: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:59.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:01.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:03.294: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:03.334: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:03.334: INFO: Unexpected error: <*errors.errorString | 0xc00017da30>: { s: "timed out waiting for the condition", } Nov 26 00:15:03.334: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0012d84b0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:15:03.335: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:15:03.375 [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\sbe\sable\sto\screate\san\sinternal\stype\sload\sbalancer\s\[Slow\]$'
test/e2e/network/loadbalancer.go:606 k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:606 +0x2dffrom junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:13:31.834 Nov 26 00:13:31.834: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 00:13:31.836 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 00:13:32.072 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 00:13:32.204 [BeforeEach] [sig-network] LoadBalancers test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:65 [It] should be able to create an internal type load balancer [Slow] test/e2e/network/loadbalancer.go:571 STEP: creating pod to be part of service lb-internal 11/26/22 00:13:32.51 Nov 26 00:13:32.583: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 00:13:32.672: INFO: Found all 1 pods Nov 26 00:13:32.672: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [lb-internal-lvsqm] Nov 26 00:13:32.672: INFO: Waiting up to 2m0s for pod "lb-internal-lvsqm" in namespace "loadbalancers-95" to be "running and ready" Nov 26 00:13:32.746: INFO: Pod "lb-internal-lvsqm": Phase="Pending", Reason="", readiness=false. Elapsed: 73.722052ms Nov 26 00:13:32.746: INFO: Error evaluating pod condition running and ready: want pod 'lb-internal-lvsqm' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:13:34.831: INFO: Pod "lb-internal-lvsqm": Phase="Pending", Reason="", readiness=false. Elapsed: 2.15896825s Nov 26 00:13:34.831: INFO: Error evaluating pod condition running and ready: want pod 'lb-internal-lvsqm' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:13:36.825: INFO: Pod "lb-internal-lvsqm": Phase="Pending", Reason="", readiness=false. Elapsed: 4.152397898s Nov 26 00:13:36.825: INFO: Error evaluating pod condition running and ready: want pod 'lb-internal-lvsqm' on 'bootstrap-e2e-minion-group-pjt7' to be 'Running' but was 'Pending' Nov 26 00:13:38.963: INFO: Pod "lb-internal-lvsqm": Phase="Running", Reason="", readiness=true. Elapsed: 6.290465974s Nov 26 00:13:38.963: INFO: Pod "lb-internal-lvsqm" satisfied condition "running and ready" Nov 26 00:13:38.963: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [lb-internal-lvsqm] STEP: creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled 11/26/22 00:13:38.963 Nov 26 00:13:39.336: INFO: Waiting up to 15m0s for service "lb-internal" to have a LoadBalancer Nov 26 00:13:59.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:01.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:03.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:05.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:07.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:09.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:11.469: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:13.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:15.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:17.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:19.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:21.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:23.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:25.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:27.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:29.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:31.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:33.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:35.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:37.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:39.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:41.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:43.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:45.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:47.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:49.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:51.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:53.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:55.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:57.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:59.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:01.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:03.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:05.469: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:07.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:09.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:11.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:13.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:15.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:17.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:19.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:21.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:23.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:25.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:27.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:29.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:31.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:33.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:35.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:37.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:39.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:41.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:43.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:45.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:47.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:49.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:51.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:53.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:55.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:57.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 5m0.598s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 5m0s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 4m53.469s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 5m20.6s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 5m20.002s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 5m13.472s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 5m40.602s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 5m40.005s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 5m33.474s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 6m0.605s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 6m0.007s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 5m53.476s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 6m20.608s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 6m20.01s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 6m13.479s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:19:53.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:19:55.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:19:57.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:19:59.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:01.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:03.469: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:05.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:07.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:09.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:11.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 6m40.61s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 6m40.013s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 6m33.482s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:20:13.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:15.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:17.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:19.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:21.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:23.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:25.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:27.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:29.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:31.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 7m0.613s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 7m0.015s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 6m53.485s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:20:33.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:35.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:37.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:39.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:41.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:43.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:45.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:47.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:49.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:51.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 7m20.615s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 7m20.017s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 7m13.486s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:20:53.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:55.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:57.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:59.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:01.469: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:03.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:05.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:07.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:09.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:11.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 7m40.618s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 7m40.02s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 7m33.489s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:21:13.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:15.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:17.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:19.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:21.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:23.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:25.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:27.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:29.469: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:31.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 8m0.62s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 8m0.022s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 7m53.491s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:21:33.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:35.469: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:37.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:39.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:41.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:43.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:45.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:47.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:49.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:51.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 8m20.622s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 8m20.025s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 8m13.494s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:21:53.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:55.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:57.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:21:59.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:01.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:03.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:05.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:07.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:09.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:11.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 8m40.625s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 8m40.027s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 8m33.496s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:22:13.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:15.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:17.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:19.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:21.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:23.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:25.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:27.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:29.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:31.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 9m0.627s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 9m0.029s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 8m53.499s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:22:33.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:35.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:37.469: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:39.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:41.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:43.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:45.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:47.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:49.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:51.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 9m20.629s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 9m20.031s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 9m13.5s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:22:53.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:55.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:57.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:22:59.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:01.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:03.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:05.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:07.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:09.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:11.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 9m40.631s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 9m40.033s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 9m33.503s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:23:13.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:15.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:17.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:19.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:21.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:23.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:25.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:27.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:29.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:31.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 10m0.633s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 10m0.035s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 9m53.505s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:23:33.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:35.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:37.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:39.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:41.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:43.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:45.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:47.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:49.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:51.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 10m20.636s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 10m20.038s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 10m13.507s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:23:53.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:55.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:57.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:23:59.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:01.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:03.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:05.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:07.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:09.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:11.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 10m40.638s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 10m40.04s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 10m33.509s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:24:13.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:15.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:17.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:19.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:21.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:23.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:25.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:27.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:29.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:31.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 11m0.641s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 11m0.043s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 10m53.512s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:24:33.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:35.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:37.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:39.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:41.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:43.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:45.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:47.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:49.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:51.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 11m20.643s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 11m20.045s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 11m13.514s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:24:53.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:55.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:57.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:24:59.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:01.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:03.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:05.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:07.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:09.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:11.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 11m40.648s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 11m40.051s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 11m33.52s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:25:13.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:15.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:17.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:19.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:21.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:23.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:25.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:27.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:29.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:25:31.467: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 12m0.65s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 12m0.052s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 11m53.522s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:25:33.468: INFO: Retrying .... error trying to get Service lb-internal: Get "https://34.168.120.117/api/v1/namespaces/loadbalancers-95/services/lb-internal": dial tcp 34.168.120.117:443: connect: connection refused ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 12m20.653s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 12m20.055s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 12m13.524s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 12m40.655s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 12m40.057s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 12m33.526s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 13m0.657s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 13m0.059s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 12m53.528s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 13m20.659s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 13m20.061s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 13m13.531s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 13m40.662s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 13m40.065s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 13m33.534s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 14m0.665s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 14m0.068s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 13m53.537s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 14m20.668s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 14m20.07s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 14m13.539s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 14m40.67s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 14m40.072s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 14m33.541s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #15 Automatically polling progress: [sig-network] LoadBalancers should be able to create an internal type load balancer [Slow] (Spec Runtime: 15m0.672s) test/e2e/network/loadbalancer.go:571 In [It] (Node Runtime: 15m0.075s) test/e2e/network/loadbalancer.go:571 At [By Step] creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled (Step Runtime: 14m53.544s) test/e2e/network/loadbalancer.go:593 Spec Goroutine goroutine 1882 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029b3068, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x30?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001178780?, 0xc003913b80?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0x7fa7740?, 0xc00017e680?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).waitForCondition(0xc002fe11d0, 0x4?, {0x7600fe2, 0x14}, 0x7895b68) test/e2e/framework/service/jig.go:631 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancer(0xc002fe11d0, 0x0?) test/e2e/framework/service/jig.go:582 > k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:605 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc003c3a300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 00:28:39.511: INFO: Unexpected error: <*fmt.wrapError | 0xc001485d40>: { msg: "timed out waiting for service \"lb-internal\" to have a load balancer: timed out waiting for the condition", err: <*errors.errorString | 0xc00017da10>{ s: "timed out waiting for the condition", }, } Nov 26 00:28:39.511: FAIL: timed out waiting for service "lb-internal" to have a load balancer: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func19.6() test/e2e/network/loadbalancer.go:606 +0x2df STEP: Clean up loadbalancer service 11/26/22 00:28:39.512 [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:28:39.512: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 Nov 26 00:28:39.597: INFO: Output of kubectl describe svc: Nov 26 00:28:39.597: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.120.117 --kubeconfig=/workspace/.kube/config --namespace=loadbalancers-95 describe svc --namespace=loadbalancers-95' Nov 26 00:28:39.927: INFO: stderr: "" Nov 26 00:28:39.927: INFO: stdout: "Name: lb-internal\nNamespace: loadbalancers-95\nLabels: testid=lb-internal-963f6ddc-8970-4c0b-af40-675f349a8e85\nAnnotations: networking.gke.io/load-balancer-type: Internal\nSelector: testid=lb-internal-963f6ddc-8970-4c0b-af40-675f349a8e85\nType: LoadBalancer\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.10.188\nIPs: 10.0.10.188\nPort: <unset> 80/TCP\nTargetPort: 80/TCP\nNodePort: <unset> 30450/TCP\nEndpoints: 10.64.1.164:80\nSession Affinity: None\nExternal Traffic Policy: Cluster\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal EnsuringLoadBalancer 8m49s service-controller Ensuring load balancer\n" Nov 26 00:28:39.927: INFO: Name: lb-internal Namespace: loadbalancers-95 Labels: testid=lb-internal-963f6ddc-8970-4c0b-af40-675f349a8e85 Annotations: networking.gke.io/load-balancer-type: Internal Selector: testid=lb-internal-963f6ddc-8970-4c0b-af40-675f349a8e85 Type: LoadBalancer IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.10.188 IPs: 10.0.10.188 Port: <unset> 80/TCP TargetPort: 80/TCP NodePort: <unset> 30450/TCP Endpoints: 10.64.1.164:80 Session Affinity: None External Traffic Policy: Cluster Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal EnsuringLoadBalancer 8m49s service-controller Ensuring load balancer [DeferCleanup (Each)] [sig-network] LoadBalancers test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:28:39.927 STEP: Collecting events from namespace "loadbalancers-95". 11/26/22 00:28:39.927 STEP: Found 9 events. 11/26/22 00:28:39.969 Nov 26 00:28:39.969: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for lb-internal-lvsqm: { } Scheduled: Successfully assigned loadbalancers-95/lb-internal-lvsqm to bootstrap-e2e-minion-group-pjt7 Nov 26 00:28:39.969: INFO: At 2022-11-26 00:13:32 +0000 UTC - event for lb-internal: {replication-controller } SuccessfulCreate: Created pod: lb-internal-lvsqm Nov 26 00:28:39.969: INFO: At 2022-11-26 00:13:34 +0000 UTC - event for lb-internal-lvsqm: {kubelet bootstrap-e2e-minion-group-pjt7} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 00:28:39.969: INFO: At 2022-11-26 00:13:34 +0000 UTC - event for lb-internal-lvsqm: {kubelet bootstrap-e2e-minion-group-pjt7} Created: Created container netexec Nov 26 00:28:39.969: INFO: At 2022-11-26 00:13:35 +0000 UTC - event for lb-internal-lvsqm: {kubelet bootstrap-e2e-minion-group-pjt7} Started: Started container netexec Nov 26 00:28:39.969: INFO: At 2022-11-26 00:19:50 +0000 UTC - event for lb-internal: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 00:28:39.969: INFO: At 2022-11-26 00:22:23 +0000 UTC - event for lb-internal-lvsqm: {kubelet bootstrap-e2e-minion-group-pjt7} Killing: Stopping container netexec Nov 26 00:28:39.969: INFO: At 2022-11-26 00:22:24 +0000 UTC - event for lb-internal-lvsqm: {kubelet bootstrap-e2e-minion-group-pjt7} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 00:28:39.969: INFO: At 2022-11-26 00:26:02 +0000 UTC - event for lb-internal-lvsqm: {kubelet bootstrap-e2e-minion-group-pjt7} BackOff: Back-off restarting failed container netexec in pod lb-internal-lvsqm_loadbalancers-95(c176abb1-290d-45d5-a35a-b7d2080c8db5) Nov 26 00:28:40.012: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 00:28:40.012: INFO: lb-internal-lvsqm bootstrap-e2e-minion-group-pjt7 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:13:32 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:26:19 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:26:19 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:13:32 +0000 UTC }] Nov 26 00:28:40.012: INFO: Nov 26 00:28:40.057: INFO: Unable to fetch loadbalancers-95/lb-internal-lvsqm/netexec logs: an error on the server ("unknown") has prevented the request from succeeding (get pods lb-internal-lvsqm) Nov 26 00:28:40.106: INFO: Logging node info for node bootstrap-e2e-master Nov 26 00:28:40.148: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master fd5fd34c-05e8-4c7e-8cbe-bf91f0f95cea 9924 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 00:25:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.120.117,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:4d77311e15d4bb4a15d85de5a36cea94,SystemUUID:4d77311e-15d4-bb4a-15d8-5de5a36cea94,BootID:80daeaca-84b8-4927-98e9-a38242975836,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:124989749,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:57659704,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:28:40.149: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 00:28:40.194: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 00:28:40.238: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-master: error trying to reach service: No agent available Nov 26 00:28:40.238: INFO: Logging node info for node bootstrap-e2e-minion-group-4434 Nov 26 00:28:40.281: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-4434 1aba3539-104b-4667-ab07-196915781437 10037 0 2022-11-25 23:56:41 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-4434 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-4434 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1058":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2486":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-9066":"bootstrap-e2e-minion-group-4434"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:08:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:25:40 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:26:26 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-4434,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:25:40 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:25:40 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:40 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:40 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:40 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:25:40 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:25:40 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:25:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.8.98,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:e4112b9ebf318dd47967311e73935166,SystemUUID:e4112b9e-bf31-8dd4-7967-311e73935166,BootID:519ea9fb-1f7c-420e-8cea-cf36b5a7caca,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},},Config:nil,},} Nov 26 00:28:40.281: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-4434 Nov 26 00:28:40.327: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-4434 Nov 26 00:28:40.370: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-4434: error trying to reach service: No agent available Nov 26 00:28:40.370: INFO: Logging node info for node bootstrap-e2e-minion-group-51gr Nov 26 00:28:40.412: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-51gr 739f23c9-858a-495c-bf21-9f7320b53ec4 10270 0 2022-11-25 23:56:31 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-51gr kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-51gr topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-5859":"bootstrap-e2e-minion-group-51gr","csi-hostpath-multivolume-6045":"bootstrap-e2e-minion-group-51gr","csi-hostpath-multivolume-855":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumeio-1998":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumemode-9682":"bootstrap-e2e-minion-group-51gr","csi-mock-csi-mock-volumes-2541":"bootstrap-e2e-minion-group-51gr"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 00:13:55 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 00:25:41 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:28:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-51gr,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:25:41 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:25:41 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:25:41 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:41 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:41 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:41 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:25:41 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:39 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:33 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.82.95.192,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:c1fa84483022b650834cff54e6b41aff,SystemUUID:c1fa8448-3022-b650-834c-ff54e6b41aff,BootID:3164b9a2-246e-435e-be83-42c92c567f8b,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:28:40.413: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-51gr Nov 26 00:28:40.458: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-51gr Nov 26 00:28:40.502: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-51gr: error trying to reach service: No agent available Nov 26 00:28:40.502: INFO: Logging node info for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:28:40.545: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-pjt7 5f55dd6b-a4d8-42f3-9e85-83e83c8dc9de 9926 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-pjt7 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-pjt7 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-6349":"bootstrap-e2e-minion-group-pjt7","csi-mock-csi-mock-volumes-8391":"bootstrap-e2e-minion-group-pjt7"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:13:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 00:25:43 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status} {node-problem-detector Update v1 2022-11-26 00:25:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-pjt7,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:25:43 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.124.11,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:f7ade85e43e2873500c8f33f09edf4a9,SystemUUID:f7ade85e-43e2-8735-00c8-f33f09edf4a9,BootID:07ab1c04-9bf6-4a67-bfa8-8d3160253b07,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9 kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6,DevicePath:,},},Config:nil,},} Nov 26 00:28:40.545: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:28:40.590: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-pjt7 Nov 26 00:28:40.641: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-pjt7: error trying to reach service: No agent available [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193 STEP: Destroying namespace "loadbalancers-95" for this suite. 11/26/22 00:28:40.641
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\sbe\sable\sto\sswitch\ssession\saffinity\sfor\sLoadBalancer\sservice\swith\sESIPP\soff\s\[Slow\]\s\[LinuxOnly\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000ca24b0) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func19.2() test/e2e/network/loadbalancer.go:73 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:02:57.13 Nov 26 00:02:57.130: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 00:02:57.132 Nov 26 00:02:57.172: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:02:59.211: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:01.212: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:03.212: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:05.213: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:07.212: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:09.212: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:11.212: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:13.211: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:15.213: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:17.211: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:19.211: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:21.212: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:23.212: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:25.212: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:27.211: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:27.251: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:27.251: INFO: Unexpected error: <*errors.errorString | 0xc000207ce0>: { s: "timed out waiting for the condition", } Nov 26 00:03:27.251: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000ca24b0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:03:27.251: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:03:27.292 [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\shandle\sload\sbalancer\scleanup\sfinalizer\sfor\sservice\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc001327b30) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func19.2() test/e2e/network/loadbalancer.go:73 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:15:32.676 Nov 26 00:15:32.676: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 00:15:32.678 Nov 26 00:15:32.718: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:34.757: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:36.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:38.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:40.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:42.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:44.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:46.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:48.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:50.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:52.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:54.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:56.758: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:18:02.060: INFO: Unexpected error: <*fmt.wrapError | 0xc00481e000>: { msg: "wait for service account \"default\" in namespace \"loadbalancers-2873\": timed out waiting for the condition", err: <*errors.errorString | 0xc0001c99e0>{ s: "timed out waiting for the condition", }, } Nov 26 00:18:02.060: FAIL: wait for service account "default" in namespace "loadbalancers-2873": timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc001327b30) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:18:02.061: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:18:02.148 STEP: Collecting events from namespace "loadbalancers-2873". 11/26/22 00:18:02.149 STEP: Found 0 events. 11/26/22 00:18:02.189 Nov 26 00:18:02.232: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 00:18:02.232: INFO: Nov 26 00:18:02.280: INFO: Logging node info for node bootstrap-e2e-master Nov 26 00:18:02.321: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master fd5fd34c-05e8-4c7e-8cbe-bf91f0f95cea 9334 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 00:17:13 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.120.117,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:4d77311e15d4bb4a15d85de5a36cea94,SystemUUID:4d77311e-15d4-bb4a-15d8-5de5a36cea94,BootID:80daeaca-84b8-4927-98e9-a38242975836,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:124989749,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:57659704,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:18:02.321: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 00:18:02.431: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 00:18:02.733: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.733: INFO: Container kube-addon-manager ready: true, restart count 1 Nov 26 00:18:02.733: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.733: INFO: Container l7-lb-controller ready: true, restart count 7 Nov 26 00:18:02.733: INFO: metadata-proxy-v0.1-thx76 started at 2022-11-25 23:56:34 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:02.733: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:02.733: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:02.733: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.733: INFO: Container kube-controller-manager ready: false, restart count 6 Nov 26 00:18:02.733: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.733: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:18:02.733: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.733: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:18:02.733: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.733: INFO: Container konnectivity-server-container ready: true, restart count 1 Nov 26 00:18:02.733: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.733: INFO: Container kube-scheduler ready: true, restart count 6 Nov 26 00:18:02.733: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.733: INFO: Container kube-apiserver ready: true, restart count 3 Nov 26 00:18:03.435: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 00:18:03.435: INFO: Logging node info for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:03.529: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-4434 1aba3539-104b-4667-ab07-196915781437 9403 0 2022-11-25 23:56:41 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-4434 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-4434 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1058":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2121":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2486":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-9066":"bootstrap-e2e-minion-group-4434","csi-hostpath-provisioning-985":"bootstrap-e2e-minion-group-4434","csi-mock-csi-mock-volumes-2299":"bootstrap-e2e-minion-group-4434"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:08:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:47 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:17:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-4434,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:13:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.8.98,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:e4112b9ebf318dd47967311e73935166,SystemUUID:e4112b9e-bf31-8dd4-7967-311e73935166,BootID:519ea9fb-1f7c-420e-8cea-cf36b5a7caca,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},},Config:nil,},} Nov 26 00:18:03.530: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:03.771: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-4434 Nov 26 00:18:05.342: INFO: konnectivity-agent-9h6nk started at 2022-11-25 23:56:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container konnectivity-agent ready: false, restart count 7 Nov 26 00:18:05.342: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.342: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:18:05.342: INFO: pod-secrets-b416252b-41f0-47a8-a1f1-2904f5649ea7 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:18:05.342: INFO: metadata-proxy-v0.1-kdtvq started at 2022-11-25 23:56:42 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:05.342: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:05.342: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:05.342: INFO: pod-570aaca2-5565-4c62-89d3-a199c7b4ebbb started at 2022-11-25 23:58:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:05.342: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:02:52 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.342: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 00:18:05.342: INFO: test-hostpath-type-cstjx started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:05.342: INFO: test-hostpath-type-966d7 started at 2022-11-26 00:13:19 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:05.342: INFO: hostexec-bootstrap-e2e-minion-group-4434-9kcrr started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container agnhost-container ready: false, restart count 6 Nov 26 00:18:05.342: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.342: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:18:05.342: INFO: netserver-0 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container webserver ready: true, restart count 4 Nov 26 00:18:05.342: INFO: pod-e24536f7-0c3d-44a2-ab47-cf68d9a28e12 started at 2022-11-26 00:04:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:05.342: INFO: nfs-server started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container nfs-server ready: true, restart count 2 Nov 26 00:18:05.342: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.342: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:05.342: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 00:18:05.342: INFO: csi-mockplugin-0 started at 2022-11-25 23:59:02 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:05.342: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:18:05.342: INFO: Container driver-registrar ready: true, restart count 6 Nov 26 00:18:05.342: INFO: Container mock ready: true, restart count 6 Nov 26 00:18:05.342: INFO: ss-1 started at 2022-11-26 00:01:13 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container webserver ready: true, restart count 7 Nov 26 00:18:05.342: INFO: kube-proxy-bootstrap-e2e-minion-group-4434 started at 2022-11-25 23:56:41 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container kube-proxy ready: false, restart count 7 Nov 26 00:18:05.342: INFO: netserver-0 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container webserver ready: false, restart count 7 Nov 26 00:18:05.342: INFO: pod-93ad783f-bd8c-43cd-b936-dc278433c338 started at 2022-11-26 00:04:42 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:05.342: INFO: hostexec-bootstrap-e2e-minion-group-4434-4ctv8 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:05.342: INFO: pvc-tester-hjwtq started at 2022-11-26 00:04:35 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:05.342: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:30 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:05.342: INFO: Container busybox ready: false, restart count 5 Nov 26 00:18:05.342: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container driver-registrar ready: true, restart count 6 Nov 26 00:18:05.342: INFO: Container mock ready: true, restart count 6 Nov 26 00:18:05.342: INFO: hostexec-bootstrap-e2e-minion-group-4434-x8nd2 started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.342: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:18:05.342: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.342: INFO: Container csi-attacher ready: true, restart count 8 Nov 26 00:18:05.342: INFO: Container csi-provisioner ready: true, restart count 8 Nov 26 00:18:05.342: INFO: Container csi-resizer ready: true, restart count 8 Nov 26 00:18:05.342: INFO: Container csi-snapshotter ready: true, restart count 8 Nov 26 00:18:05.342: INFO: Container hostpath ready: true, restart count 8 Nov 26 00:18:05.342: INFO: Container liveness-probe ready: true, restart count 8 Nov 26 00:18:05.342: INFO: Container node-driver-registrar ready: true, restart count 8 Nov 26 00:18:05.342: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:59:16 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.342: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:05.342: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:18:06.490: INFO: Latency metrics for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:06.490: INFO: Logging node info for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:06.534: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-51gr 739f23c9-858a-495c-bf21-9f7320b53ec4 9450 0 2022-11-25 23:56:31 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-51gr kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-51gr topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-5859":"bootstrap-e2e-minion-group-51gr","csi-hostpath-multivolume-6045":"bootstrap-e2e-minion-group-51gr","csi-hostpath-multivolume-855":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumeio-1998":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumemode-9682":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumemode-9999":"bootstrap-e2e-minion-group-51gr","csi-mock-csi-mock-volumes-2541":"bootstrap-e2e-minion-group-51gr"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 00:13:55 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:38 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:18:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-51gr,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:39 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:33 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.82.95.192,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:c1fa84483022b650834cff54e6b41aff,SystemUUID:c1fa8448-3022-b650-834c-ff54e6b41aff,BootID:3164b9a2-246e-435e-be83-42c92c567f8b,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-6045^2bdc44ff-6d1f-11ed-96fb-560e2640bdfc,DevicePath:,},},Config:nil,},} Nov 26 00:18:06.535: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:06.588: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-51gr Nov 26 00:18:06.705: INFO: coredns-6d97d5ddb-7cmct started at 2022-11-25 23:56:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container coredns ready: false, restart count 8 Nov 26 00:18:06.705: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:00:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-attacher ready: true, restart count 6 Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:18:06.705: INFO: Container csi-resizer ready: true, restart count 6 Nov 26 00:18:06.705: INFO: Container csi-snapshotter ready: true, restart count 6 Nov 26 00:18:06.705: INFO: Container hostpath ready: true, restart count 6 Nov 26 00:18:06.705: INFO: Container liveness-probe ready: true, restart count 6 Nov 26 00:18:06.705: INFO: Container node-driver-registrar ready: true, restart count 6 Nov 26 00:18:06.705: INFO: hostexec-bootstrap-e2e-minion-group-51gr-6fnrz started at 2022-11-26 00:13:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:18:06.705: INFO: pod-subpath-test-preprovisionedpv-zqzf started at 2022-11-26 00:04:41 +0000 UTC (1+2 container statuses recorded) Nov 26 00:18:06.705: INFO: Init container init-volume-preprovisionedpv-zqzf ready: true, restart count 4 Nov 26 00:18:06.705: INFO: Container test-container-subpath-preprovisionedpv-zqzf ready: true, restart count 6 Nov 26 00:18:06.705: INFO: Container test-container-volume-preprovisionedpv-zqzf ready: true, restart count 6 Nov 26 00:18:06.705: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-attacher ready: true, restart count 7 Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: true, restart count 7 Nov 26 00:18:06.705: INFO: Container csi-resizer ready: true, restart count 7 Nov 26 00:18:06.705: INFO: Container csi-snapshotter ready: true, restart count 7 Nov 26 00:18:06.705: INFO: Container hostpath ready: true, restart count 7 Nov 26 00:18:06.705: INFO: Container liveness-probe ready: true, restart count 7 Nov 26 00:18:06.705: INFO: Container node-driver-registrar ready: true, restart count 7 Nov 26 00:18:06.705: INFO: pod-subpath-test-inlinevolume-c42v started at 2022-11-26 00:13:32 +0000 UTC (1+2 container statuses recorded) Nov 26 00:18:06.705: INFO: Init container init-volume-inlinevolume-c42v ready: true, restart count 1 Nov 26 00:18:06.705: INFO: Container test-container-subpath-inlinevolume-c42v ready: true, restart count 3 Nov 26 00:18:06.705: INFO: Container test-container-volume-inlinevolume-c42v ready: true, restart count 3 Nov 26 00:18:06.705: INFO: hostpath-symlink-prep-provisioning-1590 started at 2022-11-26 00:13:53 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container init-volume-provisioning-1590 ready: false, restart count 0 Nov 26 00:18:06.705: INFO: kube-dns-autoscaler-5f6455f985-7kdrd started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container autoscaler ready: false, restart count 7 Nov 26 00:18:06.705: INFO: netserver-1 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container webserver ready: true, restart count 5 Nov 26 00:18:06.705: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:13:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 00:18:06.705: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 00:18:06.705: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 00:18:06.705: INFO: Container hostpath ready: true, restart count 2 Nov 26 00:18:06.705: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 00:18:06.705: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 00:18:06.705: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:18:06.705: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 00:18:06.705: INFO: Container mock ready: false, restart count 5 Nov 26 00:18:06.705: INFO: csi-mockplugin-attacher-0 started at 2022-11-25 23:58:36 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:06.705: INFO: hostexec-bootstrap-e2e-minion-group-51gr-gncwt started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container agnhost-container ready: true, restart count 4 Nov 26 00:18:06.705: INFO: kube-proxy-bootstrap-e2e-minion-group-51gr started at 2022-11-25 23:56:31 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 00:18:06.705: INFO: coredns-6d97d5ddb-6vx5m started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container coredns ready: false, restart count 7 Nov 26 00:18:06.705: INFO: konnectivity-agent-sg59x started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container konnectivity-agent ready: true, restart count 7 Nov 26 00:18:06.705: INFO: csi-mockplugin-0 started at 2022-11-25 23:58:36 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 00:18:06.705: INFO: Container driver-registrar ready: false, restart count 7 Nov 26 00:18:06.705: INFO: Container mock ready: false, restart count 7 Nov 26 00:18:06.705: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:13:42 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-attacher ready: false, restart count 3 Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: false, restart count 3 Nov 26 00:18:06.705: INFO: Container csi-resizer ready: false, restart count 3 Nov 26 00:18:06.705: INFO: Container csi-snapshotter ready: false, restart count 3 Nov 26 00:18:06.705: INFO: Container hostpath ready: false, restart count 3 Nov 26 00:18:06.705: INFO: Container liveness-probe ready: false, restart count 3 Nov 26 00:18:06.705: INFO: Container node-driver-registrar ready: false, restart count 3 Nov 26 00:18:06.705: INFO: pod-9dca799b-afc1-4920-8cdb-15687c00da67 started at 2022-11-26 00:13:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:06.705: INFO: metadata-proxy-v0.1-9xnlr started at 2022-11-25 23:56:32 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:06.705: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:06.705: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:06.705: INFO: volume-snapshot-controller-0 started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container volume-snapshot-controller ready: false, restart count 5 Nov 26 00:18:06.705: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:29 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:06.705: INFO: Container busybox ready: true, restart count 6 Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 00:18:06.705: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 00:18:06.705: INFO: Container mock ready: false, restart count 6 Nov 26 00:18:06.705: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:00 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-attacher ready: true, restart count 5 Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 00:18:06.705: INFO: Container csi-resizer ready: true, restart count 5 Nov 26 00:18:06.705: INFO: Container csi-snapshotter ready: true, restart count 5 Nov 26 00:18:06.705: INFO: Container hostpath ready: true, restart count 5 Nov 26 00:18:06.705: INFO: Container liveness-probe ready: true, restart count 5 Nov 26 00:18:06.705: INFO: Container node-driver-registrar ready: true, restart count 5 Nov 26 00:18:06.705: INFO: ss-2 started at 2022-11-26 00:01:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container webserver ready: false, restart count 7 Nov 26 00:18:06.705: INFO: l7-default-backend-8549d69d99-97xrr started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 00:18:06.705: INFO: netserver-1 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container webserver ready: false, restart count 8 Nov 26 00:18:06.705: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-attacher ready: true, restart count 8 Nov 26 00:18:06.705: INFO: Container csi-provisioner ready: true, restart count 8 Nov 26 00:18:06.705: INFO: Container csi-resizer ready: true, restart count 8 Nov 26 00:18:06.705: INFO: Container csi-snapshotter ready: true, restart count 8 Nov 26 00:18:06.705: INFO: Container hostpath ready: true, restart count 8 Nov 26 00:18:06.705: INFO: Container liveness-probe ready: true, restart count 8 Nov 26 00:18:06.705: INFO: Container node-driver-registrar ready: true, restart count 8 Nov 26 00:18:06.705: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.705: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:07.812: INFO: Latency metrics for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:07.812: INFO: Logging node info for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:07.855: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-pjt7 5f55dd6b-a4d8-42f3-9e85-83e83c8dc9de 9360 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-pjt7 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-pjt7 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-6349":"bootstrap-e2e-minion-group-pjt7","csi-mock-csi-mock-volumes-8391":"bootstrap-e2e-minion-group-pjt7"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:13:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:17:24 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-pjt7,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:49 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:49 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:49 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:13:49 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.124.11,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:f7ade85e43e2873500c8f33f09edf4a9,SystemUUID:f7ade85e-43e2-8735-00c8-f33f09edf4a9,BootID:07ab1c04-9bf6-4a67-bfa8-8d3160253b07,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9 kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6,DevicePath:,},},Config:nil,},} Nov 26 00:18:07.856: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:07.947: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:08.121: INFO: pod-subpath-test-preprovisionedpv-92c8 started at 2022-11-26 00:02:55 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Init container init-volume-preprovisionedpv-92c8 ready: true, restart count 0 Nov 26 00:18:08.122: INFO: Container test-container-subpath-preprovisionedpv-92c8 ready: false, restart count 0 Nov 26 00:18:08.122: INFO: test-hostpath-type-x56nj started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-sh-testing ready: false, restart count 0 Nov 26 00:18:08.122: INFO: hostpath-injector started at 2022-11-25 23:58:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container hostpath-injector ready: false, restart count 0 Nov 26 00:18:08.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-f89gp started at 2022-11-26 00:13:22 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:08.122: INFO: external-provisioner-p6q4d started at 2022-11-26 00:04:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container nfs-provisioner ready: true, restart count 6 Nov 26 00:18:08.122: INFO: test-hostpath-type-qw7ws started at 2022-11-26 00:13:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-sh-testing ready: true, restart count 0 Nov 26 00:18:08.122: INFO: var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364 started at 2022-11-26 00:01:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container dapi-container ready: false, restart count 0 Nov 26 00:18:08.122: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:08.122: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:08.122: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:08.122: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:08.122: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:08.122: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:08.122: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:08.122: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:18:08.122: INFO: test-hostpath-type-whtq5 started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:08.122: INFO: pod-configmaps-39454904-1ea1-4326-806f-d840f1ec6aab started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:08.122: INFO: pvc-volume-tester-wmxdq started at 2022-11-26 00:04:47 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:18:08.122: INFO: pod-subpath-test-inlinevolume-xjdn started at 2022-11-26 00:04:26 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Init container init-volume-inlinevolume-xjdn ready: true, restart count 0 Nov 26 00:18:08.122: INFO: Container test-container-subpath-inlinevolume-xjdn ready: false, restart count 0 Nov 26 00:18:08.122: INFO: test-hostpath-type-gjrf5 started at 2022-11-26 00:09:39 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:08.122: INFO: pod-configmaps-283e1a65-2a1e-4f8e-9383-eeee204154b1 started at 2022-11-25 23:58:30 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:08.122: INFO: pod-21071e9a-af84-46e8-af96-a6a6561cb020 started at 2022-11-26 00:13:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:08.122: INFO: test-hostpath-type-lx6tk started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:08.122: INFO: test-hostpath-type-245dt started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:08.122: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container csi-attacher ready: false, restart count 4 Nov 26 00:18:08.122: INFO: csi-mockplugin-0 started at 2022-11-26 00:08:03 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:08.122: INFO: Container busybox ready: false, restart count 3 Nov 26 00:18:08.122: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:18:08.122: INFO: Container driver-registrar ready: true, restart count 5 Nov 26 00:18:08.122: INFO: Container mock ready: true, restart count 5 Nov 26 00:18:08.122: INFO: nfs-server started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container nfs-server ready: true, restart count 5 Nov 26 00:18:08.122: INFO: metadata-proxy-v0.1-9jgjn started at 2022-11-25 23:56:35 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:08.122: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:08.122: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:08.122: INFO: kube-proxy-bootstrap-e2e-minion-group-pjt7 started at 2022-11-25 23:56:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container kube-proxy ready: false, restart count 7 Nov 26 00:18:08.122: INFO: pod-961df2a5-8218-4399-a758-55d8b52b3564 started at 2022-11-26 00:13:33 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:08.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-9qlmb started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:18:08.122: INFO: test-hostpath-type-n5z6m started at 2022-11-26 00:04:48 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:08.122: INFO: pod-subpath-test-preprovisionedpv-jmbn started at 2022-11-26 00:13:46 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Init container init-volume-preprovisionedpv-jmbn ready: true, restart count 0 Nov 26 00:18:08.122: INFO: Container test-container-subpath-preprovisionedpv-jmbn ready: false, restart count 0 Nov 26 00:18:08.122: INFO: pod-secrets-890a9a5b-57be-471c-8757-4aad820ed6d0 started at 2022-11-25 23:58:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:18:08.122: INFO: netserver-2 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container webserver ready: true, restart count 3 Nov 26 00:18:08.122: INFO: test-hostpath-type-9nghx started at 2022-11-26 00:04:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:08.122: INFO: forbid-27823693-tbmqx started at 2022-11-26 00:13:14 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container c ready: true, restart count 1 Nov 26 00:18:08.122: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:48 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:08.122: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:18:08.122: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 00:18:08.122: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 00:18:08.122: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 00:18:08.122: INFO: Container hostpath ready: true, restart count 4 Nov 26 00:18:08.122: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 00:18:08.122: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:18:08.122: INFO: pod-back-off-image started at 2022-11-26 00:00:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container back-off ready: false, restart count 8 Nov 26 00:18:08.122: INFO: pod-configmaps-607fb46f-a546-474e-99da-bccf05cace4e started at 2022-11-25 23:59:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:08.122: INFO: inclusterclient started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container inclusterclient ready: false, restart count 0 Nov 26 00:18:08.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-8b49h started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:18:08.122: INFO: test-hostpath-type-nxcg6 started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-sh-testing ready: true, restart count 0 Nov 26 00:18:08.122: INFO: external-provisioner-v86lp started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container nfs-provisioner ready: true, restart count 0 Nov 26 00:18:08.122: INFO: metrics-server-v0.5.2-867b8754b9-c8h52 started at 2022-11-25 23:57:03 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:08.122: INFO: Container metrics-server ready: false, restart count 7 Nov 26 00:18:08.122: INFO: Container metrics-server-nanny ready: false, restart count 8 Nov 26 00:18:08.122: INFO: external-local-nodeport-dhpjs started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container netexec ready: true, restart count 5 Nov 26 00:18:08.122: INFO: pod-cc7edce3-35cc-4f45-bad6-a784001395c6 started at 2022-11-26 00:00:17 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:08.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-wcqzb started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:08.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-9r9pc started at 2022-11-26 00:13:18 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: true, restart count 4 Nov 26 00:18:08.122: INFO: external-provisioner-994ds started at 2022-11-26 00:13:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container nfs-provisioner ready: true, restart count 5 Nov 26 00:18:08.122: INFO: ss-0 started at 2022-11-25 23:59:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container webserver ready: false, restart count 7 Nov 26 00:18:08.122: INFO: netserver-2 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container webserver ready: true, restart count 7 Nov 26 00:18:08.122: INFO: test-hostpath-type-mpdjn started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container host-path-testing ready: true, restart count 0 Nov 26 00:18:08.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-rkscb started at 2022-11-26 00:13:17 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:08.122: INFO: konnectivity-agent-ft6wq started at 2022-11-25 23:56:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container konnectivity-agent ready: true, restart count 7 Nov 26 00:18:08.122: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:08.122: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:08.122: INFO: Container driver-registrar ready: true, restart count 3 Nov 26 00:18:08.122: INFO: Container mock ready: true, restart count 3 Nov 26 00:18:08.122: INFO: pod-b48ba142-297b-4e60-b176-18111763e211 started at 2022-11-26 00:13:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:08.122: INFO: back-off-cap started at 2022-11-26 00:13:18 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container back-off-cap ready: false, restart count 5 Nov 26 00:18:08.122: INFO: lb-internal-lvsqm started at 2022-11-26 00:13:32 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container netexec ready: true, restart count 0 Nov 26 00:18:08.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-wfjcd started at 2022-11-26 00:02:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:08.122: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:18:08.512: INFO: Latency metrics for node bootstrap-e2e-minion-group-pjt7 [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193 STEP: Destroying namespace "loadbalancers-2873" for this suite. 11/26/22 00:18:08.512
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\shave\ssession\saffinity\swork\sfor\sLoadBalancer\sservice\swith\sESIPP\soff\s\[Slow\]\s\[LinuxOnly\]$'
test/e2e/network/service.go:3978 k8s.io/kubernetes/test/e2e/network.execAffinityTestForLBServiceWithOptionalTransition(0x75cdc0f?, {0x801de88, 0xc0014fc4e0}, 0xc0008a9900, 0x0) test/e2e/network/service.go:3978 +0x1b1 k8s.io/kubernetes/test/e2e/network.execAffinityTestForLBService(...) test/e2e/network/service.go:3966 k8s.io/kubernetes/test/e2e/network.glob..func19.10() test/e2e/network/loadbalancer.go:798 +0xf0from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/25/22 23:59:54.919 Nov 25 23:59:54.919: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/25/22 23:59:54.92 STEP: Waiting for a default service account to be provisioned in namespace 11/25/22 23:59:55.184 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/25/22 23:59:55.279 [BeforeEach] [sig-network] LoadBalancers test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:65 [It] should have session affinity work for LoadBalancer service with ESIPP off [Slow] [LinuxOnly] test/e2e/network/loadbalancer.go:791 STEP: creating service in namespace loadbalancers-4313 11/25/22 23:59:55.45 STEP: creating service affinity-lb in namespace loadbalancers-4313 11/25/22 23:59:55.45 STEP: creating replication controller affinity-lb in namespace loadbalancers-4313 11/25/22 23:59:55.585 I1125 23:59:55.687773 8234 runners.go:193] Created replication controller with name: affinity-lb, namespace: loadbalancers-4313, replica count: 3 I1125 23:59:58.789469 8234 runners.go:193] affinity-lb Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I1126 00:00:01.790024 8234 runners.go:193] affinity-lb Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I1126 00:00:01.790046 8234 runners.go:193] Logging node info for node bootstrap-e2e-minion-group-pjt7 I1126 00:00:01.855089 8234 runners.go:193] Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-pjt7 5f55dd6b-a4d8-42f3-9e85-83e83c8dc9de 2917 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-pjt7 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-pjt7 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-7328":"bootstrap-e2e-minion-group-pjt7"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {node-problem-detector Update v1 2022-11-25 23:56:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-25 23:58:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-25 23:59:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-pjt7,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-25 23:59:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-25 23:59:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-25 23:59:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-25 23:59:37 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.124.11,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:f7ade85e43e2873500c8f33f09edf4a9,SystemUUID:f7ade85e-43e2-8735-00c8-f33f09edf4a9,BootID:07ab1c04-9bf6-4a67-bfa8-8d3160253b07,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9,DevicePath:,},},Config:nil,},} I1126 00:00:01.855516 8234 runners.go:193] Logging kubelet events for node bootstrap-e2e-minion-group-pjt7 I1126 00:00:01.921807 8234 runners.go:193] Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-pjt7 I1126 00:00:02.052658 8234 runners.go:193] volume-prep-provisioning-7623 started at 2022-11-25 23:59:58 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052684 8234 runners.go:193] Container init-volume-provisioning-7623 ready: false, restart count 0 I1126 00:00:02.052688 8234 runners.go:193] ss-0 started at 2022-11-25 23:58:53 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052692 8234 runners.go:193] Container webserver ready: false, restart count 2 I1126 00:00:02.052695 8234 runners.go:193] ss-0 started at 2022-11-25 23:59:51 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052698 8234 runners.go:193] Container webserver ready: false, restart count 1 I1126 00:00:02.052701 8234 runners.go:193] external-provisioner-97ddc started at 2022-11-25 23:59:32 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052704 8234 runners.go:193] Container nfs-provisioner ready: true, restart count 2 I1126 00:00:02.052707 8234 runners.go:193] affinity-lb-vbnzr started at 2022-11-25 23:59:55 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052709 8234 runners.go:193] Container affinity-lb ready: true, restart count 1 I1126 00:00:02.052712 8234 runners.go:193] hostpath-injector started at 2022-11-25 23:58:51 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052715 8234 runners.go:193] Container hostpath-injector ready: false, restart count 0 I1126 00:00:02.052717 8234 runners.go:193] konnectivity-agent-ft6wq started at 2022-11-25 23:56:51 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052720 8234 runners.go:193] Container konnectivity-agent ready: false, restart count 2 I1126 00:00:02.052722 8234 runners.go:193] netserver-2 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052725 8234 runners.go:193] Container webserver ready: false, restart count 3 I1126 00:00:02.052728 8234 runners.go:193] pod-configmaps-283e1a65-2a1e-4f8e-9383-eeee204154b1 started at 2022-11-25 23:58:30 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052731 8234 runners.go:193] Container agnhost-container ready: false, restart count 0 I1126 00:00:02.052733 8234 runners.go:193] pod-secrets-890a9a5b-57be-471c-8757-4aad820ed6d0 started at 2022-11-25 23:58:34 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052739 8234 runners.go:193] Container creates-volume-test ready: false, restart count 0 I1126 00:00:02.052742 8234 runners.go:193] nfs-server started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052745 8234 runners.go:193] Container nfs-server ready: true, restart count 2 I1126 00:00:02.052747 8234 runners.go:193] pod-configmaps-39454904-1ea1-4326-806f-d840f1ec6aab started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052750 8234 runners.go:193] Container agnhost-container ready: false, restart count 0 I1126 00:00:02.052752 8234 runners.go:193] csi-hostpathplugin-0 started at 2022-11-25 23:58:33 +0000 UTC (0+7 container statuses recorded) I1126 00:00:02.052755 8234 runners.go:193] Container csi-attacher ready: true, restart count 1 I1126 00:00:02.052758 8234 runners.go:193] Container csi-provisioner ready: true, restart count 1 I1126 00:00:02.052760 8234 runners.go:193] Container csi-resizer ready: true, restart count 1 I1126 00:00:02.052762 8234 runners.go:193] Container csi-snapshotter ready: true, restart count 1 I1126 00:00:02.052764 8234 runners.go:193] Container hostpath ready: true, restart count 1 I1126 00:00:02.052766 8234 runners.go:193] Container liveness-probe ready: true, restart count 1 I1126 00:00:02.052768 8234 runners.go:193] Container node-driver-registrar ready: true, restart count 1 I1126 00:00:02.052770 8234 runners.go:193] pod-configmaps-607fb46f-a546-474e-99da-bccf05cace4e started at 2022-11-25 23:59:28 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052773 8234 runners.go:193] Container agnhost-container ready: false, restart count 0 I1126 00:00:02.052776 8234 runners.go:193] local-volume-statefulset-0 started at 2022-11-25 23:59:49 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052779 8234 runners.go:193] Container nginx ready: true, restart count 0 I1126 00:00:02.052781 8234 runners.go:193] kube-proxy-bootstrap-e2e-minion-group-pjt7 started at 2022-11-25 23:56:34 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052784 8234 runners.go:193] Container kube-proxy ready: true, restart count 3 I1126 00:00:02.052786 8234 runners.go:193] metadata-proxy-v0.1-9jgjn started at 2022-11-25 23:56:35 +0000 UTC (0+2 container statuses recorded) I1126 00:00:02.052789 8234 runners.go:193] Container metadata-proxy ready: true, restart count 0 I1126 00:00:02.052791 8234 runners.go:193] Container prometheus-to-sd-exporter ready: true, restart count 0 I1126 00:00:02.052793 8234 runners.go:193] metrics-server-v0.5.2-867b8754b9-c8h52 started at 2022-11-25 23:57:03 +0000 UTC (0+2 container statuses recorded) I1126 00:00:02.052802 8234 runners.go:193] Container metrics-server ready: false, restart count 2 I1126 00:00:02.052804 8234 runners.go:193] Container metrics-server-nanny ready: true, restart count 3 I1126 00:00:02.052808 8234 runners.go:193] hostexec-bootstrap-e2e-minion-group-pjt7-vsqtt started at 2022-11-25 23:59:27 +0000 UTC (0+1 container statuses recorded) I1126 00:00:02.052812 8234 runners.go:193] Container agnhost-container ready: true, restart count 0 I1126 00:00:05.454496 8234 runners.go:193] Latency metrics for node bootstrap-e2e-minion-group-pjt7 I1126 00:00:05.510402 8234 runners.go:193] Running kubectl logs on non-ready containers in loadbalancers-4313 Nov 26 00:00:05.572: INFO: Logs of loadbalancers-4313/affinity-lb-vbnzr:affinity-lb on node bootstrap-e2e-minion-group-pjt7 Nov 26 00:00:05.572: INFO: : STARTLOG I1126 00:00:00.982402 1 log.go:198] Serving on port 9376. I1126 00:00:01.409292 1 log.go:198] Shutting down after receiving signal: terminated. I1126 00:00:01.409867 1 log.go:198] Awaiting pod deletion. ENDLOG for container loadbalancers-4313:affinity-lb-vbnzr:affinity-lb Nov 26 00:00:05.572: INFO: Unexpected error: failed to create replication controller with service in the namespace: loadbalancers-4313: <*errors.errorString | 0xc003dc2f30>: { s: "1 containers failed which is more than allowed 0", } Nov 26 00:00:05.572: FAIL: failed to create replication controller with service in the namespace: loadbalancers-4313: 1 containers failed which is more than allowed 0 Full Stack Trace k8s.io/kubernetes/test/e2e/network.execAffinityTestForLBServiceWithOptionalTransition(0x75cdc0f?, {0x801de88, 0xc0014fc4e0}, 0xc0008a9900, 0x0) test/e2e/network/service.go:3978 +0x1b1 k8s.io/kubernetes/test/e2e/network.execAffinityTestForLBService(...) test/e2e/network/service.go:3966 k8s.io/kubernetes/test/e2e/network.glob..func19.10() test/e2e/network/loadbalancer.go:798 +0xf0 [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:00:05.573: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 Nov 26 00:00:05.632: INFO: Output of kubectl describe svc: Nov 26 00:00:05.632: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.120.117 --kubeconfig=/workspace/.kube/config --namespace=loadbalancers-4313 describe svc --namespace=loadbalancers-4313' Nov 26 00:00:06.038: INFO: stderr: "" Nov 26 00:00:06.038: INFO: stdout: "Name: affinity-lb\nNamespace: loadbalancers-4313\nLabels: <none>\nAnnotations: <none>\nSelector: name=affinity-lb\nType: LoadBalancer\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.71.175\nIPs: 10.0.71.175\nPort: <unset> 80/TCP\nTargetPort: 9376/TCP\nNodePort: <unset> 31836/TCP\nEndpoints: 10.64.0.60:9376,10.64.3.32:9376\nSession Affinity: ClientIP\nExternal Traffic Policy: Cluster\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal EnsuringLoadBalancer 11s service-controller Ensuring load balancer\n" Nov 26 00:00:06.038: INFO: Name: affinity-lb Namespace: loadbalancers-4313 Labels: <none> Annotations: <none> Selector: name=affinity-lb Type: LoadBalancer IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.71.175 IPs: 10.0.71.175 Port: <unset> 80/TCP TargetPort: 9376/TCP NodePort: <unset> 31836/TCP Endpoints: 10.64.0.60:9376,10.64.3.32:9376 Session Affinity: ClientIP External Traffic Policy: Cluster Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal EnsuringLoadBalancer 11s service-controller Ensuring load balancer [DeferCleanup (Each)] [sig-network] LoadBalancers test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:00:06.038 STEP: Collecting events from namespace "loadbalancers-4313". 11/26/22 00:00:06.038 STEP: Found 19 events. 11/26/22 00:00:06.09 Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:55 +0000 UTC - event for affinity-lb: {replication-controller } SuccessfulCreate: Created pod: affinity-lb-7v9bw Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:55 +0000 UTC - event for affinity-lb: {replication-controller } SuccessfulCreate: Created pod: affinity-lb-d6f8b Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:55 +0000 UTC - event for affinity-lb: {replication-controller } SuccessfulCreate: Created pod: affinity-lb-vbnzr Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:55 +0000 UTC - event for affinity-lb: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:55 +0000 UTC - event for affinity-lb-7v9bw: {default-scheduler } Scheduled: Successfully assigned loadbalancers-4313/affinity-lb-7v9bw to bootstrap-e2e-minion-group-4434 Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:55 +0000 UTC - event for affinity-lb-d6f8b: {default-scheduler } Scheduled: Successfully assigned loadbalancers-4313/affinity-lb-d6f8b to bootstrap-e2e-minion-group-51gr Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:55 +0000 UTC - event for affinity-lb-vbnzr: {default-scheduler } Scheduled: Successfully assigned loadbalancers-4313/affinity-lb-vbnzr to bootstrap-e2e-minion-group-pjt7 Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:56 +0000 UTC - event for affinity-lb-vbnzr: {kubelet bootstrap-e2e-minion-group-pjt7} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:56 +0000 UTC - event for affinity-lb-vbnzr: {kubelet bootstrap-e2e-minion-group-pjt7} Created: Created container affinity-lb Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:56 +0000 UTC - event for affinity-lb-vbnzr: {kubelet bootstrap-e2e-minion-group-pjt7} Started: Started container affinity-lb Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:57 +0000 UTC - event for affinity-lb-vbnzr: {kubelet bootstrap-e2e-minion-group-pjt7} Killing: Stopping container affinity-lb Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:58 +0000 UTC - event for affinity-lb-7v9bw: {kubelet bootstrap-e2e-minion-group-4434} Started: Started container affinity-lb Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:58 +0000 UTC - event for affinity-lb-7v9bw: {kubelet bootstrap-e2e-minion-group-4434} Created: Created container affinity-lb Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:58 +0000 UTC - event for affinity-lb-7v9bw: {kubelet bootstrap-e2e-minion-group-4434} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:58 +0000 UTC - event for affinity-lb-d6f8b: {kubelet bootstrap-e2e-minion-group-51gr} Started: Started container affinity-lb Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:58 +0000 UTC - event for affinity-lb-d6f8b: {kubelet bootstrap-e2e-minion-group-51gr} Created: Created container affinity-lb Nov 26 00:00:06.090: INFO: At 2022-11-25 23:59:58 +0000 UTC - event for affinity-lb-d6f8b: {kubelet bootstrap-e2e-minion-group-51gr} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 00:00:06.090: INFO: At 2022-11-26 00:00:00 +0000 UTC - event for affinity-lb-vbnzr: {kubelet bootstrap-e2e-minion-group-pjt7} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 00:00:06.090: INFO: At 2022-11-26 00:00:05 +0000 UTC - event for affinity-lb-vbnzr: {kubelet bootstrap-e2e-minion-group-pjt7} BackOff: Back-off restarting failed container affinity-lb in pod affinity-lb-vbnzr_loadbalancers-4313(a9a7abd3-ac3d-44c7-9169-c569a35954a1) Nov 26 00:00:06.142: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 00:00:06.142: INFO: affinity-lb-7v9bw bootstrap-e2e-minion-group-4434 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:55 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:59 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:59 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:55 +0000 UTC }] Nov 26 00:00:06.143: INFO: affinity-lb-d6f8b bootstrap-e2e-minion-group-51gr Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:55 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:58 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:58 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:55 +0000 UTC }] Nov 26 00:00:06.143: INFO: affinity-lb-vbnzr bootstrap-e2e-minion-group-pjt7 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:55 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:00:04 +0000 UTC ContainersNotReady containers with unready status: [affinity-lb]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 00:00:04 +0000 UTC ContainersNotReady containers with unready status: [affinity-lb]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-25 23:59:55 +0000 UTC }] Nov 26 00:00:06.143: INFO: Nov 26 00:00:06.515: INFO: Logging node info for node bootstrap-e2e-master Nov 26 00:00:06.562: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master fd5fd34c-05e8-4c7e-8cbe-bf91f0f95cea 612 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-25 23:56:54 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-25 23:56:54 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-25 23:56:54 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-25 23:56:54 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-25 23:56:54 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.120.117,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:4d77311e15d4bb4a15d85de5a36cea94,SystemUUID:4d77311e-15d4-bb4a-15d8-5de5a36cea94,BootID:80daeaca-84b8-4927-98e9-a38242975836,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:124989749,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:57659704,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:00:06.562: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 00:00:06.616: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 00:00:06.750: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:06.750: INFO: Container konnectivity-server-container ready: true, restart count 0 Nov 26 00:00:06.750: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:06.750: INFO: Container kube-addon-manager ready: true, restart count 0 Nov 26 00:00:06.750: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:06.750: INFO: Container l7-lb-controller ready: true, restart count 4 Nov 26 00:00:06.750: INFO: metadata-proxy-v0.1-thx76 started at 2022-11-25 23:56:34 +0000 UTC (0+2 container statuses recorded) Nov 26 00:00:06.750: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:00:06.750: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:00:06.750: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:06.750: INFO: Container kube-controller-manager ready: true, restart count 1 Nov 26 00:00:06.750: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:06.750: INFO: Container etcd-container ready: true, restart count 0 Nov 26 00:00:06.750: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:06.750: INFO: Container etcd-container ready: true, restart count 0 Nov 26 00:00:06.750: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:06.750: INFO: Container kube-scheduler ready: false, restart count 0 Nov 26 00:00:06.750: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:06.750: INFO: Container kube-apiserver ready: true, restart count 0 Nov 26 00:00:08.368: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 00:00:08.368: INFO: Logging node info for node bootstrap-e2e-minion-group-4434 Nov 26 00:00:08.454: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-4434 1aba3539-104b-4667-ab07-196915781437 3389 0 2022-11-25 23:56:41 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-4434 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-4434 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1058":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2486":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-406":"bootstrap-e2e-minion-group-4434","csi-hostpath-provisioning-985":"bootstrap-e2e-minion-group-4434","csi-mock-csi-mock-volumes-2299":"bootstrap-e2e-minion-group-4434"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {node-problem-detector Update v1 2022-11-25 23:56:45 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-25 23:59:23 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 00:00:08 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-4434,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-25 23:56:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-25 23:56:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-25 23:56:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-25 23:56:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-25 23:59:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-25 23:59:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-25 23:59:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-25 23:59:45 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.8.98,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:e4112b9ebf318dd47967311e73935166,SystemUUID:e4112b9e-bf31-8dd4-7967-311e73935166,BootID:519ea9fb-1f7c-420e-8cea-cf36b5a7caca,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e kubernetes.io/csi/csi-hostpath-multivolume-406^2cca5649-6d1d-11ed-9d1c-5eaa1656f316 kubernetes.io/csi/csi-hostpath-multivolume-406^2dd56f7e-6d1d-11ed-9d1c-5eaa1656f316 kubernetes.io/csi/csi-hostpath-provisioning-8288^0c535cb0-6d1d-11ed-b626-ca6f412d6440 kubernetes.io/csi/csi-mock-csi-mock-volumes-2299^32d4ddee-6d1d-11ed-a58e-3ec373b86bf4],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-406^2dd56f7e-6d1d-11ed-9d1c-5eaa1656f316,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-406^2cca5649-6d1d-11ed-9d1c-5eaa1656f316,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-8288^0c535cb0-6d1d-11ed-b626-ca6f412d6440,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},},Config:nil,},} Nov 26 00:00:08.455: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-4434 Nov 26 00:00:08.554: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-4434 Nov 26 00:00:09.330: INFO: hostexec-bootstrap-e2e-minion-group-4434-hvf25 started at 2022-11-25 23:59:48 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:00:09.330: INFO: metadata-proxy-v0.1-kdtvq started at 2022-11-25 23:56:42 +0000 UTC (0+2 container statuses recorded) Nov 26 00:00:09.330: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:00:09.330: INFO: pod-570aaca2-5565-4c62-89d3-a199c7b4ebbb started at 2022-11-25 23:58:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:00:09.330: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:00:09.330: INFO: Container csi-attacher ready: false, restart count 2 Nov 26 00:00:09.330: INFO: Container csi-provisioner ready: false, restart count 2 Nov 26 00:00:09.330: INFO: Container csi-resizer ready: false, restart count 2 Nov 26 00:00:09.330: INFO: Container csi-snapshotter ready: false, restart count 2 Nov 26 00:00:09.330: INFO: Container hostpath ready: false, restart count 2 Nov 26 00:00:09.330: INFO: Container liveness-probe ready: false, restart count 2 Nov 26 00:00:09.330: INFO: Container node-driver-registrar ready: false, restart count 2 Nov 26 00:00:09.330: INFO: pvc-volume-tester-p6sg8 started at 2022-11-25 23:59:31 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:00:09.330: INFO: hostexec-bootstrap-e2e-minion-group-4434-9kcrr started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:00:09.330: INFO: pod-subpath-test-dynamicpv-7nnh started at 2022-11-25 23:58:25 +0000 UTC (1+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Init container init-volume-dynamicpv-7nnh ready: false, restart count 0 Nov 26 00:00:09.330: INFO: Container test-container-subpath-dynamicpv-7nnh ready: false, restart count 0 Nov 26 00:00:09.330: INFO: pod-b4811215-c199-4c9a-82d1-1b9c86b4f43f started at 2022-11-25 23:59:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container write-pod ready: true, restart count 0 Nov 26 00:00:09.330: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:59:16 +0000 UTC (0+7 container statuses recorded) Nov 26 00:00:09.330: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container hostpath ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 00:00:09.330: INFO: affinity-lb-7v9bw started at 2022-11-25 23:59:55 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container affinity-lb ready: true, restart count 0 Nov 26 00:00:09.330: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:00:09.330: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:00:09.330: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:00:09.330: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 00:00:09.330: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 00:00:09.330: INFO: Container hostpath ready: true, restart count 1 Nov 26 00:00:09.330: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 00:00:09.330: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 00:00:09.330: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:00:09.330: INFO: Container csi-attacher ready: false, restart count 1 Nov 26 00:00:09.330: INFO: Container csi-provisioner ready: false, restart count 1 Nov 26 00:00:09.330: INFO: Container csi-resizer ready: false, restart count 1 Nov 26 00:00:09.330: INFO: Container csi-snapshotter ready: false, restart count 1 Nov 26 00:00:09.330: INFO: Container hostpath ready: false, restart count 1 Nov 26 00:00:09.330: INFO: Container liveness-probe ready: false, restart count 1 Nov 26 00:00:09.330: INFO: Container node-driver-registrar ready: false, restart count 1 Nov 26 00:00:09.330: INFO: csi-mockplugin-0 started at 2022-11-25 23:59:02 +0000 UTC (0+3 container statuses recorded) Nov 26 00:00:09.330: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 00:00:09.330: INFO: Container driver-registrar ready: true, restart count 2 Nov 26 00:00:09.330: INFO: Container mock ready: true, restart count 2 Nov 26 00:00:09.330: INFO: ss-2 started at 2022-11-25 23:59:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container webserver ready: false, restart count 0 Nov 26 00:00:09.330: INFO: hostexec-bootstrap-e2e-minion-group-4434-bsjp2 started at 2022-11-25 23:58:35 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:00:09.330: INFO: local-volume-statefulset-2 started at 2022-11-25 23:59:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container nginx ready: true, restart count 0 Nov 26 00:00:09.330: INFO: kube-proxy-bootstrap-e2e-minion-group-4434 started at 2022-11-25 23:56:41 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container kube-proxy ready: true, restart count 3 Nov 26 00:00:09.330: INFO: konnectivity-agent-9h6nk started at 2022-11-25 23:56:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container konnectivity-agent ready: false, restart count 2 Nov 26 00:00:09.330: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:00:09.330: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container hostpath ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 00:00:09.330: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 00:00:09.330: INFO: netserver-0 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:09.330: INFO: Container webserver ready: true, restart count 2 Nov 26 00:00:10.176: INFO: Latency metrics for node bootstrap-e2e-minion-group-4434 Nov 26 00:00:10.176: INFO: Logging node info for node bootstrap-e2e-minion-group-51gr Nov 26 00:00:10.235: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-51gr 739f23c9-858a-495c-bf21-9f7320b53ec4 3401 0 2022-11-25 23:56:31 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-51gr kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-51gr topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-volumeio-1998":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumemode-9682":"bootstrap-e2e-minion-group-51gr","csi-mock-csi-mock-volumes-4710":"bootstrap-e2e-minion-group-51gr"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {node-problem-detector Update v1 2022-11-25 23:56:36 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-25 23:59:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 00:00:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-51gr,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-25 23:56:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-25 23:56:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-25 23:56:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-25 23:56:36 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:39 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:00:06 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:00:06 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:00:06 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:00:06 +0000 UTC,LastTransitionTime:2022-11-25 23:56:33 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.82.95.192,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:c1fa84483022b650834cff54e6b41aff,SystemUUID:c1fa8448-3022-b650-834c-ff54e6b41aff,BootID:3164b9a2-246e-435e-be83-42c92c567f8b,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4710^247124b3-6d1d-11ed-8d20-26297d20e60e],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:00:10.235: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-51gr Nov 26 00:00:10.289: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-51gr Nov 26 00:00:10.632: INFO: kube-dns-autoscaler-5f6455f985-7kdrd started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container autoscaler ready: true, restart count 2 Nov 26 00:00:10.632: INFO: pod-subpath-test-preprovisionedpv-4fjl started at 2022-11-25 23:58:56 +0000 UTC (1+2 container statuses recorded) Nov 26 00:00:10.632: INFO: Init container init-volume-preprovisionedpv-4fjl ready: true, restart count 1 Nov 26 00:00:10.632: INFO: Container test-container-subpath-preprovisionedpv-4fjl ready: true, restart count 4 Nov 26 00:00:10.632: INFO: Container test-container-volume-preprovisionedpv-4fjl ready: false, restart count 2 Nov 26 00:00:10.632: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:00:10.632: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:00:10.632: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 00:00:10.632: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 00:00:10.632: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 00:00:10.632: INFO: Container hostpath ready: true, restart count 4 Nov 26 00:00:10.632: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 00:00:10.632: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:00:10.632: INFO: csi-mockplugin-attacher-0 started at 2022-11-25 23:58:36 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:00:10.632: INFO: pvc-volume-tester-nx9sb started at 2022-11-25 23:59:06 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:00:10.632: INFO: l7-default-backend-8549d69d99-97xrr started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 00:00:10.632: INFO: netserver-1 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container webserver ready: false, restart count 3 Nov 26 00:00:10.632: INFO: ss-1 started at 2022-11-25 23:59:14 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container webserver ready: true, restart count 1 Nov 26 00:00:10.632: INFO: konnectivity-agent-sg59x started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container konnectivity-agent ready: true, restart count 2 Nov 26 00:00:10.632: INFO: coredns-6d97d5ddb-7cmct started at 2022-11-25 23:56:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container coredns ready: false, restart count 2 Nov 26 00:00:10.632: INFO: csi-mockplugin-0 started at 2022-11-25 23:58:36 +0000 UTC (0+3 container statuses recorded) Nov 26 00:00:10.632: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:00:10.632: INFO: Container driver-registrar ready: true, restart count 1 Nov 26 00:00:10.632: INFO: Container mock ready: true, restart count 1 Nov 26 00:00:10.632: INFO: kube-proxy-bootstrap-e2e-minion-group-51gr started at 2022-11-25 23:56:31 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container kube-proxy ready: true, restart count 3 Nov 26 00:00:10.632: INFO: coredns-6d97d5ddb-6vx5m started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container coredns ready: false, restart count 3 Nov 26 00:00:10.632: INFO: hostexec-bootstrap-e2e-minion-group-51gr-wrlsk started at 2022-11-25 23:59:06 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:00:10.632: INFO: local-volume-statefulset-1 started at 2022-11-25 23:59:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container nginx ready: true, restart count 1 Nov 26 00:00:10.632: INFO: affinity-lb-d6f8b started at 2022-11-25 23:59:55 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container affinity-lb ready: true, restart count 0 Nov 26 00:00:10.632: INFO: hostexec-bootstrap-e2e-minion-group-51gr-c27wr started at 2022-11-25 23:58:31 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:00:10.632: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:00:10.632: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 00:00:10.632: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 00:00:10.632: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 00:00:10.632: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 00:00:10.632: INFO: Container hostpath ready: true, restart count 0 Nov 26 00:00:10.632: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 00:00:10.632: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 00:00:10.632: INFO: metadata-proxy-v0.1-9xnlr started at 2022-11-25 23:56:32 +0000 UTC (0+2 container statuses recorded) Nov 26 00:00:10.632: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:00:10.632: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:00:10.632: INFO: volume-snapshot-controller-0 started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:10.632: INFO: Container volume-snapshot-controller ready: true, restart count 2 Nov 26 00:00:11.011: INFO: Latency metrics for node bootstrap-e2e-minion-group-51gr Nov 26 00:00:11.011: INFO: Logging node info for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:00:11.062: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-pjt7 5f55dd6b-a4d8-42f3-9e85-83e83c8dc9de 3391 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-pjt7 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-pjt7 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-7328":"bootstrap-e2e-minion-group-pjt7"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {node-problem-detector Update v1 2022-11-25 23:56:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-25 23:58:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 00:00:08 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-pjt7,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:00:08 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:00:08 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:00:08 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:00:08 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.124.11,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:f7ade85e43e2873500c8f33f09edf4a9,SystemUUID:f7ade85e-43e2-8735-00c8-f33f09edf4a9,BootID:07ab1c04-9bf6-4a67-bfa8-8d3160253b07,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9,DevicePath:,},},Config:nil,},} Nov 26 00:00:11.063: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:00:11.137: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-pjt7 Nov 26 00:00:11.230: INFO: metadata-proxy-v0.1-9jgjn started at 2022-11-25 23:56:35 +0000 UTC (0+2 container statuses recorded) Nov 26 00:00:11.230: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:00:11.230: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:00:11.230: INFO: metrics-server-v0.5.2-867b8754b9-c8h52 started at 2022-11-25 23:57:03 +0000 UTC (0+2 container statuses recorded) Nov 26 00:00:11.230: INFO: Container metrics-server ready: false, restart count 2 Nov 26 00:00:11.230: INFO: Container metrics-server-nanny ready: false, restart count 3 Nov 26 00:00:11.230: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-vsqtt started at 2022-11-25 23:59:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:00:11.230: INFO: pod-configmaps-607fb46f-a546-474e-99da-bccf05cace4e started at 2022-11-25 23:59:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:00:11.230: INFO: local-volume-statefulset-0 started at 2022-11-25 23:59:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container nginx ready: true, restart count 0 Nov 26 00:00:11.230: INFO: kube-proxy-bootstrap-e2e-minion-group-pjt7 started at 2022-11-25 23:56:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container kube-proxy ready: true, restart count 3 Nov 26 00:00:11.230: INFO: ss-0 started at 2022-11-25 23:59:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container webserver ready: false, restart count 1 Nov 26 00:00:11.230: INFO: external-provisioner-97ddc started at 2022-11-25 23:59:32 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container nfs-provisioner ready: true, restart count 2 Nov 26 00:00:11.230: INFO: affinity-lb-vbnzr started at 2022-11-25 23:59:55 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container affinity-lb ready: false, restart count 1 Nov 26 00:00:11.230: INFO: ss-0 started at 2022-11-25 23:58:53 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container webserver ready: false, restart count 2 Nov 26 00:00:11.230: INFO: netserver-2 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container webserver ready: false, restart count 3 Nov 26 00:00:11.230: INFO: pod-configmaps-283e1a65-2a1e-4f8e-9383-eeee204154b1 started at 2022-11-25 23:58:30 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:00:11.230: INFO: pod-secrets-890a9a5b-57be-471c-8757-4aad820ed6d0 started at 2022-11-25 23:58:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:00:11.230: INFO: hostpath-injector started at 2022-11-25 23:58:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container hostpath-injector ready: false, restart count 0 Nov 26 00:00:11.230: INFO: konnectivity-agent-ft6wq started at 2022-11-25 23:56:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container konnectivity-agent ready: false, restart count 2 Nov 26 00:00:11.230: INFO: pod-configmaps-39454904-1ea1-4326-806f-d840f1ec6aab started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.230: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:00:11.231: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:00:11.231: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 00:00:11.231: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 00:00:11.231: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 00:00:11.231: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 00:00:11.231: INFO: Container hostpath ready: true, restart count 1 Nov 26 00:00:11.231: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 00:00:11.231: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 00:00:11.231: INFO: nfs-server started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:00:11.231: INFO: Container nfs-server ready: true, restart count 2 Nov 26 00:00:11.722: INFO: Latency metrics for node bootstrap-e2e-minion-group-pjt7 [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193 STEP: Destroying namespace "loadbalancers-4313" for this suite. 11/26/22 00:00:11.722
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\shave\ssession\saffinity\swork\sfor\sLoadBalancer\sservice\swith\sESIPP\son\s\[Slow\]\s\[LinuxOnly\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0009a84b0) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func19.2() test/e2e/network/loadbalancer.go:73 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:15:32.679 Nov 26 00:15:32.679: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 00:15:32.681 Nov 26 00:15:32.721: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:34.760: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:36.761: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:38.761: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:40.760: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:42.761: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:44.761: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:46.760: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:48.760: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:50.760: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:52.761: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:54.760: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:56.762: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:18:02.061: INFO: Unexpected error: <*fmt.wrapError | 0xc004d4e000>: { msg: "wait for service account \"default\" in namespace \"loadbalancers-334\": timed out waiting for the condition", err: <*errors.errorString | 0xc0000d1da0>{ s: "timed out waiting for the condition", }, } Nov 26 00:18:02.061: FAIL: wait for service account "default" in namespace "loadbalancers-334": timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc0009a84b0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:18:02.061: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:18:02.148 STEP: Collecting events from namespace "loadbalancers-334". 11/26/22 00:18:02.148 STEP: Found 0 events. 11/26/22 00:18:02.19 Nov 26 00:18:02.234: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 00:18:02.234: INFO: Nov 26 00:18:02.283: INFO: Logging node info for node bootstrap-e2e-master Nov 26 00:18:02.325: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master fd5fd34c-05e8-4c7e-8cbe-bf91f0f95cea 9334 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 00:17:13 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:17:13 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.120.117,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:4d77311e15d4bb4a15d85de5a36cea94,SystemUUID:4d77311e-15d4-bb4a-15d8-5de5a36cea94,BootID:80daeaca-84b8-4927-98e9-a38242975836,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:124989749,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:57659704,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 00:18:02.325: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 00:18:02.431: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 00:18:02.756: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.756: INFO: Container kube-addon-manager ready: true, restart count 1 Nov 26 00:18:02.756: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-25 23:56:07 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.756: INFO: Container l7-lb-controller ready: true, restart count 7 Nov 26 00:18:02.756: INFO: metadata-proxy-v0.1-thx76 started at 2022-11-25 23:56:34 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:02.756: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:02.756: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:02.756: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.756: INFO: Container kube-controller-manager ready: false, restart count 6 Nov 26 00:18:02.756: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.756: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:18:02.756: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.756: INFO: Container etcd-container ready: true, restart count 2 Nov 26 00:18:02.756: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.756: INFO: Container konnectivity-server-container ready: true, restart count 1 Nov 26 00:18:02.756: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.756: INFO: Container kube-scheduler ready: true, restart count 6 Nov 26 00:18:02.756: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-25 23:55:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:02.756: INFO: Container kube-apiserver ready: true, restart count 3 Nov 26 00:18:03.823: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 00:18:03.823: INFO: Logging node info for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:03.868: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-4434 1aba3539-104b-4667-ab07-196915781437 9403 0 2022-11-25 23:56:41 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-4434 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-4434 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1058":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2121":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-2486":"bootstrap-e2e-minion-group-4434","csi-hostpath-multivolume-9066":"bootstrap-e2e-minion-group-4434","csi-hostpath-provisioning-985":"bootstrap-e2e-minion-group-4434","csi-mock-csi-mock-volumes-2299":"bootstrap-e2e-minion-group-4434"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:08:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:47 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:17:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-4434,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:47 +0000 UTC,LastTransitionTime:2022-11-25 23:56:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:13:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:41 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.8.98,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-4434.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:e4112b9ebf318dd47967311e73935166,SystemUUID:e4112b9e-bf31-8dd4-7967-311e73935166,BootID:519ea9fb-1f7c-420e-8cea-cf36b5a7caca,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0cea4a8c-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-2486^0c573c17-6d1d-11ed-b16d-c6f47b03087e,DevicePath:,},},Config:nil,},} Nov 26 00:18:03.869: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:03.993: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-4434 Nov 26 00:18:05.370: INFO: netserver-0 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container webserver ready: false, restart count 7 Nov 26 00:18:05.370: INFO: pod-93ad783f-bd8c-43cd-b936-dc278433c338 started at 2022-11-26 00:04:42 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:05.370: INFO: kube-proxy-bootstrap-e2e-minion-group-4434 started at 2022-11-25 23:56:41 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container kube-proxy ready: false, restart count 7 Nov 26 00:18:05.370: INFO: hostexec-bootstrap-e2e-minion-group-4434-4ctv8 started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:05.370: INFO: pvc-tester-hjwtq started at 2022-11-26 00:04:35 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:05.370: INFO: hostexec-bootstrap-e2e-minion-group-4434-x8nd2 started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:18:05.370: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:30 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:05.370: INFO: Container busybox ready: false, restart count 5 Nov 26 00:18:05.370: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container driver-registrar ready: true, restart count 6 Nov 26 00:18:05.370: INFO: Container mock ready: true, restart count 6 Nov 26 00:18:05.370: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:59:16 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.370: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:18:05.370: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:14 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.370: INFO: Container csi-attacher ready: true, restart count 8 Nov 26 00:18:05.370: INFO: Container csi-provisioner ready: true, restart count 8 Nov 26 00:18:05.370: INFO: Container csi-resizer ready: true, restart count 8 Nov 26 00:18:05.370: INFO: Container csi-snapshotter ready: true, restart count 8 Nov 26 00:18:05.370: INFO: Container hostpath ready: true, restart count 8 Nov 26 00:18:05.370: INFO: Container liveness-probe ready: true, restart count 8 Nov 26 00:18:05.370: INFO: Container node-driver-registrar ready: true, restart count 8 Nov 26 00:18:05.370: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.370: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:05.370: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:18:05.370: INFO: konnectivity-agent-9h6nk started at 2022-11-25 23:56:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container konnectivity-agent ready: false, restart count 7 Nov 26 00:18:05.370: INFO: pod-570aaca2-5565-4c62-89d3-a199c7b4ebbb started at 2022-11-25 23:58:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:05.370: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:02:52 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.370: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 00:18:05.370: INFO: pod-secrets-b416252b-41f0-47a8-a1f1-2904f5649ea7 started at 2022-11-26 00:09:00 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:18:05.370: INFO: metadata-proxy-v0.1-kdtvq started at 2022-11-25 23:56:42 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:05.370: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:05.370: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:05.370: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:29 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.370: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:18:05.370: INFO: netserver-0 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container webserver ready: true, restart count 4 Nov 26 00:18:05.370: INFO: test-hostpath-type-cstjx started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:05.370: INFO: test-hostpath-type-966d7 started at 2022-11-26 00:13:19 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:05.370: INFO: hostexec-bootstrap-e2e-minion-group-4434-9kcrr started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container agnhost-container ready: false, restart count 6 Nov 26 00:18:05.370: INFO: csi-mockplugin-0 started at 2022-11-25 23:59:02 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:05.370: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:18:05.370: INFO: Container driver-registrar ready: true, restart count 6 Nov 26 00:18:05.370: INFO: Container mock ready: true, restart count 6 Nov 26 00:18:05.370: INFO: ss-1 started at 2022-11-26 00:01:13 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container webserver ready: true, restart count 7 Nov 26 00:18:05.370: INFO: pod-e24536f7-0c3d-44a2-ab47-cf68d9a28e12 started at 2022-11-26 00:04:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:05.370: INFO: nfs-server started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:05.370: INFO: Container nfs-server ready: true, restart count 2 Nov 26 00:18:05.370: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:05.370: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container hostpath ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 00:18:05.370: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 00:18:06.719: INFO: Latency metrics for node bootstrap-e2e-minion-group-4434 Nov 26 00:18:06.719: INFO: Logging node info for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:06.761: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-51gr 739f23c9-858a-495c-bf21-9f7320b53ec4 9450 0 2022-11-25 23:56:31 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-51gr kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-51gr topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-5859":"bootstrap-e2e-minion-group-51gr","csi-hostpath-multivolume-6045":"bootstrap-e2e-minion-group-51gr","csi-hostpath-multivolume-855":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumeio-1998":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumemode-9682":"bootstrap-e2e-minion-group-51gr","csi-hostpath-volumemode-9999":"bootstrap-e2e-minion-group-51gr","csi-mock-csi-mock-volumes-2541":"bootstrap-e2e-minion-group-51gr"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-25 23:56:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-25 23:56:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 00:13:55 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:38 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:18:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-51gr,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:38 +0000 UTC,LastTransitionTime:2022-11-25 23:56:35 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:39 +0000 UTC,LastTransitionTime:2022-11-25 23:56:39 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:31 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:16:12 +0000 UTC,LastTransitionTime:2022-11-25 23:56:33 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.82.95.192,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-51gr.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:c1fa84483022b650834cff54e6b41aff,SystemUUID:c1fa8448-3022-b650-834c-ff54e6b41aff,BootID:3164b9a2-246e-435e-be83-42c92c567f8b,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-multivolume-6045^2bdc44ff-6d1f-11ed-96fb-560e2640bdfc,DevicePath:,},},Config:nil,},} Nov 26 00:18:06.761: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:06.807: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-51gr Nov 26 00:18:06.975: INFO: kube-dns-autoscaler-5f6455f985-7kdrd started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container autoscaler ready: false, restart count 7 Nov 26 00:18:06.975: INFO: netserver-1 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container webserver ready: true, restart count 5 Nov 26 00:18:06.975: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:13:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.975: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 00:18:06.975: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 00:18:06.975: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 00:18:06.975: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 00:18:06.975: INFO: Container hostpath ready: true, restart count 2 Nov 26 00:18:06.975: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 00:18:06.975: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 00:18:06.975: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:06.975: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:18:06.975: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 00:18:06.975: INFO: Container mock ready: false, restart count 5 Nov 26 00:18:06.975: INFO: csi-mockplugin-attacher-0 started at 2022-11-25 23:58:36 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:06.975: INFO: kube-proxy-bootstrap-e2e-minion-group-51gr started at 2022-11-25 23:56:31 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 00:18:06.975: INFO: coredns-6d97d5ddb-6vx5m started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container coredns ready: false, restart count 7 Nov 26 00:18:06.975: INFO: konnectivity-agent-sg59x started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container konnectivity-agent ready: true, restart count 7 Nov 26 00:18:06.975: INFO: csi-mockplugin-0 started at 2022-11-25 23:58:36 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:06.975: INFO: Container csi-provisioner ready: false, restart count 7 Nov 26 00:18:06.975: INFO: Container driver-registrar ready: false, restart count 7 Nov 26 00:18:06.975: INFO: Container mock ready: false, restart count 7 Nov 26 00:18:06.975: INFO: hostexec-bootstrap-e2e-minion-group-51gr-gncwt started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container agnhost-container ready: true, restart count 4 Nov 26 00:18:06.975: INFO: metadata-proxy-v0.1-9xnlr started at 2022-11-25 23:56:32 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:06.975: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:06.975: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:06.975: INFO: volume-snapshot-controller-0 started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container volume-snapshot-controller ready: false, restart count 5 Nov 26 00:18:06.975: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:29 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:06.975: INFO: Container busybox ready: true, restart count 6 Nov 26 00:18:06.975: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 00:18:06.975: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 00:18:06.975: INFO: Container mock ready: false, restart count 6 Nov 26 00:18:06.975: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:00 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.975: INFO: Container csi-attacher ready: true, restart count 5 Nov 26 00:18:06.975: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 00:18:06.975: INFO: Container csi-resizer ready: true, restart count 5 Nov 26 00:18:06.975: INFO: Container csi-snapshotter ready: true, restart count 5 Nov 26 00:18:06.975: INFO: Container hostpath ready: true, restart count 5 Nov 26 00:18:06.975: INFO: Container liveness-probe ready: true, restart count 5 Nov 26 00:18:06.975: INFO: Container node-driver-registrar ready: true, restart count 5 Nov 26 00:18:06.975: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:13:42 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.975: INFO: Container csi-attacher ready: false, restart count 3 Nov 26 00:18:06.975: INFO: Container csi-provisioner ready: false, restart count 3 Nov 26 00:18:06.975: INFO: Container csi-resizer ready: false, restart count 3 Nov 26 00:18:06.975: INFO: Container csi-snapshotter ready: false, restart count 3 Nov 26 00:18:06.975: INFO: Container hostpath ready: false, restart count 3 Nov 26 00:18:06.975: INFO: Container liveness-probe ready: false, restart count 3 Nov 26 00:18:06.975: INFO: Container node-driver-registrar ready: false, restart count 3 Nov 26 00:18:06.975: INFO: pod-9dca799b-afc1-4920-8cdb-15687c00da67 started at 2022-11-26 00:13:50 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.975: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:06.976: INFO: ss-2 started at 2022-11-26 00:01:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.976: INFO: Container webserver ready: false, restart count 7 Nov 26 00:18:06.976: INFO: l7-default-backend-8549d69d99-97xrr started at 2022-11-25 23:56:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.976: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 00:18:06.976: INFO: netserver-1 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.976: INFO: Container webserver ready: false, restart count 8 Nov 26 00:18:06.976: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:15 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.976: INFO: Container csi-attacher ready: true, restart count 8 Nov 26 00:18:06.976: INFO: Container csi-provisioner ready: true, restart count 8 Nov 26 00:18:06.976: INFO: Container csi-resizer ready: true, restart count 8 Nov 26 00:18:06.976: INFO: Container csi-snapshotter ready: true, restart count 8 Nov 26 00:18:06.976: INFO: Container hostpath ready: true, restart count 8 Nov 26 00:18:06.976: INFO: Container liveness-probe ready: true, restart count 8 Nov 26 00:18:06.976: INFO: Container node-driver-registrar ready: true, restart count 8 Nov 26 00:18:06.976: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.976: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 00:18:06.976: INFO: coredns-6d97d5ddb-7cmct started at 2022-11-25 23:56:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.976: INFO: Container coredns ready: false, restart count 8 Nov 26 00:18:06.976: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:00:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.976: INFO: Container csi-attacher ready: true, restart count 6 Nov 26 00:18:06.976: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 00:18:06.976: INFO: Container csi-resizer ready: true, restart count 6 Nov 26 00:18:06.976: INFO: Container csi-snapshotter ready: true, restart count 6 Nov 26 00:18:06.976: INFO: Container hostpath ready: true, restart count 6 Nov 26 00:18:06.976: INFO: Container liveness-probe ready: true, restart count 6 Nov 26 00:18:06.976: INFO: Container node-driver-registrar ready: true, restart count 6 Nov 26 00:18:06.976: INFO: hostexec-bootstrap-e2e-minion-group-51gr-6fnrz started at 2022-11-26 00:13:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.976: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:18:06.976: INFO: pod-subpath-test-preprovisionedpv-zqzf started at 2022-11-26 00:04:41 +0000 UTC (1+2 container statuses recorded) Nov 26 00:18:06.976: INFO: Init container init-volume-preprovisionedpv-zqzf ready: true, restart count 4 Nov 26 00:18:06.976: INFO: Container test-container-subpath-preprovisionedpv-zqzf ready: true, restart count 6 Nov 26 00:18:06.976: INFO: Container test-container-volume-preprovisionedpv-zqzf ready: true, restart count 6 Nov 26 00:18:06.976: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:37 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:06.976: INFO: Container csi-attacher ready: true, restart count 7 Nov 26 00:18:06.976: INFO: Container csi-provisioner ready: true, restart count 7 Nov 26 00:18:06.976: INFO: Container csi-resizer ready: true, restart count 7 Nov 26 00:18:06.976: INFO: Container csi-snapshotter ready: true, restart count 7 Nov 26 00:18:06.976: INFO: Container hostpath ready: true, restart count 7 Nov 26 00:18:06.976: INFO: Container liveness-probe ready: true, restart count 7 Nov 26 00:18:06.976: INFO: Container node-driver-registrar ready: true, restart count 7 Nov 26 00:18:06.976: INFO: pod-subpath-test-inlinevolume-c42v started at 2022-11-26 00:13:32 +0000 UTC (1+2 container statuses recorded) Nov 26 00:18:06.976: INFO: Init container init-volume-inlinevolume-c42v ready: true, restart count 1 Nov 26 00:18:06.976: INFO: Container test-container-subpath-inlinevolume-c42v ready: true, restart count 3 Nov 26 00:18:06.976: INFO: Container test-container-volume-inlinevolume-c42v ready: true, restart count 3 Nov 26 00:18:06.976: INFO: hostpath-symlink-prep-provisioning-1590 started at 2022-11-26 00:13:53 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:06.976: INFO: Container init-volume-provisioning-1590 ready: false, restart count 0 Nov 26 00:18:08.655: INFO: Latency metrics for node bootstrap-e2e-minion-group-51gr Nov 26 00:18:08.655: INFO: Logging node info for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:08.698: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-pjt7 5f55dd6b-a4d8-42f3-9e85-83e83c8dc9de 9360 0 2022-11-25 23:56:34 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-pjt7 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-pjt7 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-6349":"bootstrap-e2e-minion-group-pjt7","csi-mock-csi-mock-volumes-8391":"bootstrap-e2e-minion-group-pjt7"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}}}} } {kubelet Update v1 2022-11-25 23:56:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 00:13:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 00:16:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 00:17:24 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jenkins-gci-kubemark/us-west1-b/bootstrap-e2e-minion-group-pjt7,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 00:16:42 +0000 UTC,LastTransitionTime:2022-11-25 23:56:38 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-25 23:56:50 +0000 UTC,LastTransitionTime:2022-11-25 23:56:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:49 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:49 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 00:13:49 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 00:13:49 +0000 UTC,LastTransitionTime:2022-11-25 23:56:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.124.11,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-pjt7.c.k8s-jenkins-gci-kubemark.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:f7ade85e43e2873500c8f33f09edf4a9,SystemUUID:f7ade85e-43e2-8735-00c8-f33f09edf4a9,BootID:07ab1c04-9bf6-4a67-bfa8-8d3160253b07,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.48+6bdda2da160043,KubeProxyVersion:v1.27.0-alpha.0.48+6bdda2da160043,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.48_6bdda2da160043],SizeBytes:67201224,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9 kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-7328^17edf36d-6d1d-11ed-b5f0-aa417b48ccd9,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-8391^ef87f140-6d1d-11ed-bcc3-7e9139d791d6,DevicePath:,},},Config:nil,},} Nov 26 00:18:08.699: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:08.756: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-pjt7 Nov 26 00:18:09.122: INFO: nfs-server started at 2022-11-25 23:58:11 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container nfs-server ready: true, restart count 5 Nov 26 00:18:09.122: INFO: pod-21071e9a-af84-46e8-af96-a6a6561cb020 started at 2022-11-26 00:13:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:09.122: INFO: test-hostpath-type-lx6tk started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:09.122: INFO: test-hostpath-type-245dt started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:09.122: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 00:04:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container csi-attacher ready: false, restart count 4 Nov 26 00:18:09.122: INFO: csi-mockplugin-0 started at 2022-11-26 00:08:03 +0000 UTC (0+4 container statuses recorded) Nov 26 00:18:09.122: INFO: Container busybox ready: false, restart count 3 Nov 26 00:18:09.122: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 00:18:09.122: INFO: Container driver-registrar ready: true, restart count 5 Nov 26 00:18:09.122: INFO: Container mock ready: true, restart count 5 Nov 26 00:18:09.122: INFO: kube-proxy-bootstrap-e2e-minion-group-pjt7 started at 2022-11-25 23:56:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container kube-proxy ready: false, restart count 7 Nov 26 00:18:09.122: INFO: metadata-proxy-v0.1-9jgjn started at 2022-11-25 23:56:35 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:09.122: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 00:18:09.122: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 00:18:09.122: INFO: pod-961df2a5-8218-4399-a758-55d8b52b3564 started at 2022-11-26 00:13:33 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:09.122: INFO: pod-secrets-890a9a5b-57be-471c-8757-4aad820ed6d0 started at 2022-11-25 23:58:34 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 00:18:09.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-9qlmb started at 2022-11-26 00:04:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:18:09.122: INFO: test-hostpath-type-n5z6m started at 2022-11-26 00:04:48 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:09.122: INFO: pod-subpath-test-preprovisionedpv-jmbn started at 2022-11-26 00:13:46 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Init container init-volume-preprovisionedpv-jmbn ready: true, restart count 0 Nov 26 00:18:09.122: INFO: Container test-container-subpath-preprovisionedpv-jmbn ready: false, restart count 0 Nov 26 00:18:09.122: INFO: csi-hostpathplugin-0 started at 2022-11-26 00:09:48 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:09.122: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 00:18:09.122: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 00:18:09.122: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 00:18:09.122: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 00:18:09.122: INFO: Container hostpath ready: true, restart count 4 Nov 26 00:18:09.122: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 00:18:09.122: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 00:18:09.122: INFO: netserver-2 started at 2022-11-26 00:04:45 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container webserver ready: true, restart count 3 Nov 26 00:18:09.122: INFO: test-hostpath-type-9nghx started at 2022-11-26 00:04:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:09.122: INFO: forbid-27823693-tbmqx started at 2022-11-26 00:13:14 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container c ready: true, restart count 1 Nov 26 00:18:09.122: INFO: metrics-server-v0.5.2-867b8754b9-c8h52 started at 2022-11-25 23:57:03 +0000 UTC (0+2 container statuses recorded) Nov 26 00:18:09.122: INFO: Container metrics-server ready: false, restart count 7 Nov 26 00:18:09.122: INFO: Container metrics-server-nanny ready: false, restart count 8 Nov 26 00:18:09.122: INFO: pod-back-off-image started at 2022-11-26 00:00:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container back-off ready: false, restart count 8 Nov 26 00:18:09.122: INFO: pod-configmaps-607fb46f-a546-474e-99da-bccf05cace4e started at 2022-11-25 23:59:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:09.122: INFO: inclusterclient started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container inclusterclient ready: false, restart count 0 Nov 26 00:18:09.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-8b49h started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 00:18:09.122: INFO: test-hostpath-type-nxcg6 started at 2022-11-26 00:13:15 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-sh-testing ready: true, restart count 0 Nov 26 00:18:09.122: INFO: external-provisioner-v86lp started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container nfs-provisioner ready: true, restart count 0 Nov 26 00:18:09.122: INFO: ss-0 started at 2022-11-25 23:59:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container webserver ready: false, restart count 7 Nov 26 00:18:09.122: INFO: external-local-nodeport-dhpjs started at 2022-11-26 00:04:25 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container netexec ready: true, restart count 5 Nov 26 00:18:09.122: INFO: pod-cc7edce3-35cc-4f45-bad6-a784001395c6 started at 2022-11-26 00:00:17 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:09.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-wcqzb started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:09.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-9r9pc started at 2022-11-26 00:13:18 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: true, restart count 4 Nov 26 00:18:09.122: INFO: external-provisioner-994ds started at 2022-11-26 00:13:49 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container nfs-provisioner ready: true, restart count 5 Nov 26 00:18:09.122: INFO: konnectivity-agent-ft6wq started at 2022-11-25 23:56:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container konnectivity-agent ready: true, restart count 7 Nov 26 00:18:09.122: INFO: netserver-2 started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container webserver ready: true, restart count 7 Nov 26 00:18:09.122: INFO: test-hostpath-type-mpdjn started at 2022-11-26 00:13:16 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-testing ready: true, restart count 0 Nov 26 00:18:09.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-rkscb started at 2022-11-26 00:13:17 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:09.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-wfjcd started at 2022-11-26 00:02:40 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 00:18:09.122: INFO: csi-mockplugin-0 started at 2022-11-26 00:04:28 +0000 UTC (0+3 container statuses recorded) Nov 26 00:18:09.122: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 00:18:09.122: INFO: Container driver-registrar ready: true, restart count 3 Nov 26 00:18:09.122: INFO: Container mock ready: true, restart count 3 Nov 26 00:18:09.122: INFO: pod-b48ba142-297b-4e60-b176-18111763e211 started at 2022-11-26 00:13:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container write-pod ready: false, restart count 0 Nov 26 00:18:09.122: INFO: back-off-cap started at 2022-11-26 00:13:18 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container back-off-cap ready: false, restart count 5 Nov 26 00:18:09.122: INFO: lb-internal-lvsqm started at 2022-11-26 00:13:32 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container netexec ready: true, restart count 0 Nov 26 00:18:09.122: INFO: pod-subpath-test-preprovisionedpv-92c8 started at 2022-11-26 00:02:55 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Init container init-volume-preprovisionedpv-92c8 ready: true, restart count 0 Nov 26 00:18:09.122: INFO: Container test-container-subpath-preprovisionedpv-92c8 ready: false, restart count 0 Nov 26 00:18:09.122: INFO: test-hostpath-type-x56nj started at 2022-11-26 00:04:26 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-sh-testing ready: false, restart count 0 Nov 26 00:18:09.122: INFO: var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364 started at 2022-11-26 00:01:46 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container dapi-container ready: false, restart count 0 Nov 26 00:18:09.122: INFO: hostpath-injector started at 2022-11-25 23:58:51 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container hostpath-injector ready: false, restart count 0 Nov 26 00:18:09.122: INFO: hostexec-bootstrap-e2e-minion-group-pjt7-f89gp started at 2022-11-26 00:13:22 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 00:18:09.122: INFO: external-provisioner-p6q4d started at 2022-11-26 00:04:27 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container nfs-provisioner ready: true, restart count 6 Nov 26 00:18:09.122: INFO: test-hostpath-type-qw7ws started at 2022-11-26 00:13:28 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-sh-testing ready: true, restart count 0 Nov 26 00:18:09.122: INFO: pod-configmaps-39454904-1ea1-4326-806f-d840f1ec6aab started at 2022-11-25 23:58:12 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:09.122: INFO: csi-hostpathplugin-0 started at 2022-11-25 23:58:33 +0000 UTC (0+7 container statuses recorded) Nov 26 00:18:09.122: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 00:18:09.122: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 00:18:09.122: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 00:18:09.122: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 00:18:09.122: INFO: Container hostpath ready: false, restart count 6 Nov 26 00:18:09.122: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 00:18:09.122: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 00:18:09.122: INFO: test-hostpath-type-whtq5 started at 2022-11-26 00:04:24 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:09.122: INFO: pod-subpath-test-inlinevolume-xjdn started at 2022-11-26 00:04:26 +0000 UTC (1+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Init container init-volume-inlinevolume-xjdn ready: true, restart count 0 Nov 26 00:18:09.122: INFO: Container test-container-subpath-inlinevolume-xjdn ready: false, restart count 0 Nov 26 00:18:09.122: INFO: pvc-volume-tester-wmxdq started at 2022-11-26 00:04:47 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container volume-tester ready: false, restart count 0 Nov 26 00:18:09.122: INFO: pod-configmaps-283e1a65-2a1e-4f8e-9383-eeee204154b1 started at 2022-11-25 23:58:30 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 00:18:09.122: INFO: test-hostpath-type-gjrf5 started at 2022-11-26 00:09:39 +0000 UTC (0+1 container statuses recorded) Nov 26 00:18:09.122: INFO: Container host-path-testing ready: false, restart count 0 Nov 26 00:18:10.087: INFO: Latency metrics for node bootstrap-e2e-minion-group-pjt7 [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193 STEP: Destroying namespace "loadbalancers-334" for this suite. 11/26/22 00:18:10.087
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sshould\sonly\sallow\saccess\sfrom\sservice\sloadbalancer\ssource\sranges\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000d052c0) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func19.2() test/e2e/network/loadbalancer.go:73 +0x113from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:02:57.463 Nov 26 00:02:57.464: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename loadbalancers 11/26/22 00:02:57.466 Nov 26 00:02:57.505: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:02:59.545: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:01.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:03.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:05.545: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:07.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:09.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:11.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:13.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:15.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:17.545: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:19.545: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:21.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:23.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:25.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:27.546: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:27.585: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:03:27.585: INFO: Unexpected error: <*errors.errorString | 0xc000215c80>: { s: "timed out waiting for the condition", } Nov 26 00:03:27.585: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000d052c0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] LoadBalancers test/e2e/framework/node/init/init.go:32 Nov 26 00:03:27.585: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers test/e2e/network/loadbalancer.go:71 [DeferCleanup (Each)] [sig-network] LoadBalancers dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:03:27.625 [DeferCleanup (Each)] [sig-network] LoadBalancers tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sNetworking\sGranular\sChecks\:\sServices\sshould\supdate\snodePort\:\shttp\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000ca25a0) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-network] Networking set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:15:02.506 Nov 26 00:15:02.506: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename nettest 11/26/22 00:15:02.508 Nov 26 00:15:02.547: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:04.588: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:06.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:08.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:10.588: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:12.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:14.588: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:16.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:18.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:20.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:22.588: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:24.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:26.588: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:28.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:30.587: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:32.588: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:32.628: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:15:32.628: INFO: Unexpected error: <*errors.errorString | 0xc000207ce0>: { s: "timed out waiting for the condition", } Nov 26 00:15:32.628: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000ca25a0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 Nov 26 00:15:32.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:15:32.668 [DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sNetworking\sGranular\sChecks\:\sServices\sshould\supdate\snodePort\:\sudp\s\[Slow\]$'
test/e2e/framework/network/utils.go:866 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc0011281c0, {0x75c6f7c, 0x9}, 0xc00451b650) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc0011281c0, 0x7fb8345b0248?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc0011281c0, 0x3e?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000d185a0, {0xc003dc6f20, 0x1, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func22.6.15() test/e2e/network/networking.go:395 +0x51 There were additional failures detected after the initial failure: [FAILED] Nov 26 00:02:57.436: failed to list events in namespace "nettest-4085": Get "https://34.168.120.117/api/v1/namespaces/nettest-4085/events": dial tcp 34.168.120.117:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 00:02:57.476: Couldn't delete ns: "nettest-4085": Delete "https://34.168.120.117/api/v1/namespaces/nettest-4085": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/nettest-4085", Err:(*net.OpError)(0xc0047626e0)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-network] Networking set up framework | framework.go:178 STEP: Creating a kubernetes client 11/25/22 23:58:11.751 Nov 25 23:58:11.751: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename nettest 11/25/22 23:58:11.754 STEP: Waiting for a default service account to be provisioned in namespace 11/25/22 23:58:12.23 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/25/22 23:58:12.325 [BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 [It] should update nodePort: udp [Slow] test/e2e/network/networking.go:394 STEP: Performing setup for networking test in namespace nettest-4085 11/25/22 23:58:12.529 STEP: creating a selector 11/25/22 23:58:12.529 STEP: Creating the service pods in kubernetes 11/25/22 23:58:12.529 Nov 25 23:58:12.529: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Nov 25 23:58:12.960: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "nettest-4085" to be "running and ready" Nov 25 23:58:13.008: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 48.87116ms Nov 25 23:58:13.008: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 25 23:58:15.067: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 2.107805012s Nov 25 23:58:15.067: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 25 23:58:17.052: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 4.092809569s Nov 25 23:58:17.052: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 25 23:58:19.063: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.103383624s Nov 25 23:58:19.063: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 25 23:58:21.053: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 8.093823313s Nov 25 23:58:21.053: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 25 23:58:23.061: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.101566393s Nov 25 23:58:23.061: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:25.113: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.15387513s Nov 25 23:58:25.113: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:27.054: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.093974178s Nov 25 23:58:27.054: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:29.069: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.109724691s Nov 25 23:58:29.069: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:31.053: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.09339244s Nov 25 23:58:31.053: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:33.052: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.092201227s Nov 25 23:58:33.052: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:35.052: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 22.092262021s Nov 25 23:58:35.052: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:37.054: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 24.094524789s Nov 25 23:58:37.054: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:39.051: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 26.091269785s Nov 25 23:58:39.051: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:41.061: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 28.101178979s Nov 25 23:58:41.061: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:43.051: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 30.091127699s Nov 25 23:58:43.051: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:45.050: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 32.090554445s Nov 25 23:58:45.050: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:47.051: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 34.091567174s Nov 25 23:58:47.051: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:49.051: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 36.09151302s Nov 25 23:58:49.051: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:51.052: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 38.092075508s Nov 25 23:58:51.052: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 25 23:58:53.084: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 40.1246847s Nov 25 23:58:53.084: INFO: The phase of Pod netserver-0 is Running (Ready = true) Nov 25 23:58:53.084: INFO: Pod "netserver-0" satisfied condition "running and ready" Nov 25 23:58:53.143: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "nettest-4085" to be "running and ready" Nov 25 23:58:53.186: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 43.494848ms Nov 25 23:58:53.186: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:58:55.231: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2.08795814s Nov 25 23:58:55.231: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:58:57.230: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 4.087298939s Nov 25 23:58:57.230: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:58:59.232: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 6.089414862s Nov 25 23:58:59.232: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:01.232: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 8.089653105s Nov 25 23:59:01.232: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:03.232: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 10.089435036s Nov 25 23:59:03.232: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:05.238: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 12.095640587s Nov 25 23:59:05.238: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:07.233: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 14.089964908s Nov 25 23:59:07.233: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:09.345: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 16.202564333s Nov 25 23:59:09.345: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:11.232: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 18.088968297s Nov 25 23:59:11.232: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:13.243: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 20.100597096s Nov 25 23:59:13.243: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:15.239: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 22.096068317s Nov 25 23:59:15.239: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:17.230: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 24.087671226s Nov 25 23:59:17.230: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:19.344: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 26.2018442s Nov 25 23:59:19.344: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:21.231: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 28.088516548s Nov 25 23:59:21.231: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:23.249: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 30.106761602s Nov 25 23:59:23.249: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:25.314: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 32.171042199s Nov 25 23:59:25.314: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:27.254: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 34.111620886s Nov 25 23:59:27.254: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:29.259: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 36.116500136s Nov 25 23:59:29.259: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:31.245: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 38.102862458s Nov 25 23:59:31.245: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:33.252: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 40.109276997s Nov 25 23:59:33.252: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:35.275: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 42.131960076s Nov 25 23:59:35.275: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:37.244: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 44.101683552s Nov 25 23:59:37.244: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:39.297: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 46.154525237s Nov 25 23:59:39.297: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:41.266: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 48.123383133s Nov 25 23:59:41.266: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:43.289: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 50.146145319s Nov 25 23:59:43.289: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:45.288: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 52.145292096s Nov 25 23:59:45.288: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:47.243: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 54.100202217s Nov 25 23:59:47.243: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:49.240: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 56.097302812s Nov 25 23:59:49.240: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:51.257: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 58.114636676s Nov 25 23:59:51.257: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:53.236: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m0.093795388s Nov 25 23:59:53.236: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:55.255: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m2.112185794s Nov 25 23:59:55.255: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:57.235: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m4.092816192s Nov 25 23:59:57.235: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 25 23:59:59.241: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m6.098264696s Nov 25 23:59:59.241: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:01.252: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m8.109275905s Nov 26 00:00:01.252: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:03.243: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m10.100141743s Nov 26 00:00:03.243: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:05.280: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m12.137178588s Nov 26 00:00:05.280: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:07.256: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m14.113845764s Nov 26 00:00:07.256: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:09.333: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m16.189933595s Nov 26 00:00:09.333: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:11.238: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m18.095673321s Nov 26 00:00:11.238: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:13.237: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m20.094402908s Nov 26 00:00:13.237: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:15.256: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m22.113091198s Nov 26 00:00:15.256: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:17.230: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m24.087815006s Nov 26 00:00:17.230: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:19.235: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m26.092226899s Nov 26 00:00:19.235: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:21.234: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m28.091508369s Nov 26 00:00:21.234: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:23.233: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m30.090291814s Nov 26 00:00:23.233: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 00:00:25.229: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 1m32.086852493s Nov 26 00:00:25.229: INFO: The phase of Pod netserver-1 is Running (Ready = true) Nov 26 00:00:25.229: INFO: Pod "netserver-1" satisfied condition "running and ready" Nov 26 00:00:25.273: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "nettest-4085" to be "running and ready" Nov 26 00:00:25.316: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 42.554723ms Nov 26 00:00:25.316: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:27.374: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2.100511831s Nov 26 00:00:27.374: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:29.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 4.085851513s Nov 26 00:00:29.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:31.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 6.085122216s Nov 26 00:00:31.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:33.367: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 8.094179807s Nov 26 00:00:33.367: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:35.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 10.085797699s Nov 26 00:00:35.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:37.385: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 12.112064089s Nov 26 00:00:37.385: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:39.444: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 14.170617564s Nov 26 00:00:39.444: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:41.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 16.085051033s Nov 26 00:00:41.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:43.388: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 18.114282308s Nov 26 00:00:43.388: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:45.412: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 20.138734449s Nov 26 00:00:45.412: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:47.396: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 22.123102954s Nov 26 00:00:47.396: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:49.374: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 24.100469733s Nov 26 00:00:49.374: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:51.368: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 26.095058209s Nov 26 00:00:51.368: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:53.369: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 28.095677205s Nov 26 00:00:53.369: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:55.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 30.085435017s Nov 26 00:00:55.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:57.381: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 32.107434194s Nov 26 00:00:57.381: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:00:59.382: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 34.108370176s Nov 26 00:00:59.382: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:01.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 36.085375169s Nov 26 00:01:01.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:03.367: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 38.093528654s Nov 26 00:01:03.367: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:05.366: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 40.092768652s Nov 26 00:01:05.366: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:07.381: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 42.107350929s Nov 26 00:01:07.381: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:09.417: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 44.143393933s Nov 26 00:01:09.417: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:11.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 46.085204456s Nov 26 00:01:11.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:13.374: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 48.100521895s Nov 26 00:01:13.374: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:15.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 50.084857409s Nov 26 00:01:15.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:17.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 52.085147913s Nov 26 00:01:17.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:19.393: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 54.119637149s Nov 26 00:01:19.393: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:21.363: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 56.09022946s Nov 26 00:01:21.364: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:23.367: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 58.093700601s Nov 26 00:01:23.367: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:25.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m0.085358696s Nov 26 00:01:25.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:27.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m2.085912272s Nov 26 00:01:27.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:29.957: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m4.683751933s Nov 26 00:01:29.957: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:31.379: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m6.105251909s Nov 26 00:01:31.379: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:33.426: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m8.152767046s Nov 26 00:01:33.426: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:35.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m10.085886996s Nov 26 00:01:35.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:37.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m12.084750046s Nov 26 00:01:37.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:39.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m14.085229429s Nov 26 00:01:39.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:41.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m16.084962313s Nov 26 00:01:41.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:43.369: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m18.095989583s Nov 26 00:01:43.369: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:45.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m20.08560031s Nov 26 00:01:45.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:47.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m22.084411455s Nov 26 00:01:47.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:49.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m24.085935043s Nov 26 00:01:49.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:51.360: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m26.087159865s Nov 26 00:01:51.360: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:53.366: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m28.092601877s Nov 26 00:01:53.366: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:55.372: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m30.098424279s Nov 26 00:01:55.372: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:57.381: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m32.108116454s Nov 26 00:01:57.381: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:01:59.366: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m34.092579798s Nov 26 00:01:59.366: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:01.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m36.086134038s Nov 26 00:02:01.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:03.370: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m38.096838913s Nov 26 00:02:03.370: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:05.367: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m40.09421182s Nov 26 00:02:05.368: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:07.398: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m42.12480993s Nov 26 00:02:07.398: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:09.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m44.084740672s Nov 26 00:02:09.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:11.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m46.084693168s Nov 26 00:02:11.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:13.373: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m48.100114257s Nov 26 00:02:13.373: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:15.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m50.086029006s Nov 26 00:02:15.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:17.379: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m52.105386331s Nov 26 00:02:17.379: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:19.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m54.08498767s Nov 26 00:02:19.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:21.359: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m56.085902986s Nov 26 00:02:21.359: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:23.360: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m58.087106505s Nov 26 00:02:23.360: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:25.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m0.084543682s Nov 26 00:02:25.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:27.383: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m2.109557646s Nov 26 00:02:27.383: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:29.374: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m4.10032696s Nov 26 00:02:29.374: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:31.381: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m6.107420172s Nov 26 00:02:31.381: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:33.370: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m8.096784631s Nov 26 00:02:33.370: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:35.366: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m10.09311952s Nov 26 00:02:35.366: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:37.363: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m12.089317342s Nov 26 00:02:37.363: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:39.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m14.084824099s Nov 26 00:02:39.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:41.358: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m16.084709698s Nov 26 00:02:41.358: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:43.374: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m18.100626566s Nov 26 00:02:43.374: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:45.369: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m20.095750421s Nov 26 00:02:45.369: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:47.397: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m22.123546449s Nov 26 00:02:47.397: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:49.421: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m24.147533832s Nov 26 00:02:49.421: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:51.375: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m26.101909278s Nov 26 00:02:51.375: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:53.472: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m28.198254753s Nov 26 00:02:53.472: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:55.377: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2m30.1035703s Nov 26 00:02:55.377: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 00:02:57.356: INFO: Encountered non-retryable error while getting pod nettest-4085/netserver-2: Get "https://34.168.120.117/api/v1/namespaces/nettest-4085/pods/netserver-2": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:02:57.357: INFO: Unexpected error: <*fmt.wrapError | 0xc0046214e0>: { msg: "error while waiting for pod nettest-4085/netserver-2 to be running and ready: Get \"https://34.168.120.117/api/v1/namespaces/nettest-4085/pods/netserver-2\": dial tcp 34.168.120.117:443: connect: connection refused", err: <*url.Error | 0xc003c794d0>{ Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/nettest-4085/pods/netserver-2", Err: <*net.OpError | 0xc004762500>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0034625a0>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc0046214a0>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, }, } Nov 26 00:02:57.357: FAIL: error while waiting for pod nettest-4085/netserver-2 to be running and ready: Get "https://34.168.120.117/api/v1/namespaces/nettest-4085/pods/netserver-2": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc0011281c0, {0x75c6f7c, 0x9}, 0xc00451b650) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc0011281c0, 0x7fb8345b0248?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc0011281c0, 0x3e?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000d185a0, {0xc003dc6f20, 0x1, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func22.6.15() test/e2e/network/networking.go:395 +0x51 [AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 Nov 26 00:02:57.357: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:02:57.397 STEP: Collecting events from namespace "nettest-4085". 11/26/22 00:02:57.397 Nov 26 00:02:57.436: INFO: Unexpected error: failed to list events in namespace "nettest-4085": <*url.Error | 0xc00451ab70>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/nettest-4085/events", Err: <*net.OpError | 0xc0046164b0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc003462b70>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc00347f220>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:02:57.436: FAIL: failed to list events in namespace "nettest-4085": Get "https://34.168.120.117/api/v1/namespaces/nettest-4085/events": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc0020d45c0, {0xc0036675a0, 0xc}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc00379ab60}, {0xc0036675a0, 0xc}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc0020d4650?, {0xc0036675a0?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc000d185a0) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc001378400?, 0xc004428fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc0036b83c8?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc001378400?, 0x29449fc?}, {0xae73300?, 0xc004428f80?, 0x0?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 STEP: Destroying namespace "nettest-4085" for this suite. 11/26/22 00:02:57.437 Nov 26 00:02:57.476: FAIL: Couldn't delete ns: "nettest-4085": Delete "https://34.168.120.117/api/v1/namespaces/nettest-4085": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/nettest-4085", Err:(*net.OpError)(0xc0047626e0)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc000d185a0) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc001378330?, 0xc000ca4c80?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc0009de690?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc001378330?, 0xc000013728?}, {0xae73300?, 0xc000013830?, 0xc000999e60?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sServices\sGCE\s\[Slow\]\sshould\sbe\sable\sto\screate\sand\stear\sdown\sa\sstandard\-tier\sload\sbalancer\s\[Slow\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000bbe0f0) test/e2e/framework/framework.go:241 +0x96f There were additional failures detected after the initial failure: [PANICKED] Test Panicked In [AfterEach] at: /usr/local/go/src/runtime/panic.go:260 runtime error: invalid memory address or nil pointer dereference Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func21.2() test/e2e/network/network_tiers.go:57 +0x133from junit_01.xml
[BeforeEach] [sig-network] Services GCE [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:13:58.807 Nov 26 00:13:58.807: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename services 11/26/22 00:13:58.808 Nov 26 00:13:58.848: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:00.889: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:02.888: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:04.888: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:06.889: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:08.888: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:10.889: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:12.888: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:14.889: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:16.888: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:18.888: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:20.889: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:22.889: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:24.888: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:26.889: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:28.888: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:28.927: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:14:28.927: INFO: Unexpected error: <*errors.errorString | 0xc000205d30>: { s: "timed out waiting for the condition", } Nov 26 00:14:28.927: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000bbe0f0) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-network] Services GCE [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 00:14:28.928: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] Services GCE [Slow] test/e2e/network/network_tiers.go:55 [DeferCleanup (Each)] [sig-network] Services GCE [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:14:28.968 [DeferCleanup (Each)] [sig-network] Services GCE [Slow] tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-node\]\sPods\sshould\scap\sback\-off\sat\sMaxContainerBackOff\s\[Slow\]\[NodeConformance\]$'
test/e2e/common/node/pods.go:129 k8s.io/kubernetes/test/e2e/common/node.getRestartDelay(0xc0011e4780, {0x75d2035, 0xc}, {0x75d2035, 0xc}) test/e2e/common/node/pods.go:129 +0x225 k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:746 +0x37f There were additional failures detected after the initial failure: [FAILED] Nov 26 00:23:37.355: failed to list events in namespace "pods-2454": Get "https://34.168.120.117/api/v1/namespaces/pods-2454/events": dial tcp 34.168.120.117:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 00:23:37.395: Couldn't delete ns: "pods-2454": Delete "https://34.168.120.117/api/v1/namespaces/pods-2454": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/pods-2454", Err:(*net.OpError)(0xc0034dd950)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-node] Pods set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:11:18.617 Nov 26 00:11:18.617: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename pods 11/26/22 00:11:18.62 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 00:13:15.752 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 00:13:15.862 [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-node] Pods test/e2e/common/node/pods.go:194 [It] should cap back-off at MaxContainerBackOff [Slow][NodeConformance] test/e2e/common/node/pods.go:717 Nov 26 00:13:18.022: INFO: Waiting up to 5m0s for pod "back-off-cap" in namespace "pods-2454" to be "running and ready" Nov 26 00:13:18.063: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 41.245816ms Nov 26 00:13:18.063: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:20.153: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 2.131592063s Nov 26 00:13:20.153: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:22.128: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 4.106544196s Nov 26 00:13:22.128: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:24.407: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 6.385041099s Nov 26 00:13:24.407: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:26.135: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 8.113519787s Nov 26 00:13:26.135: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:28.136: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 10.113981989s Nov 26 00:13:28.136: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:30.140: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 12.118120796s Nov 26 00:13:30.140: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:32.139: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 14.117291719s Nov 26 00:13:32.139: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:34.200: INFO: Pod "back-off-cap": Phase="Pending", Reason="", readiness=false. Elapsed: 16.178577674s Nov 26 00:13:34.200: INFO: The phase of Pod back-off-cap is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:13:36.154: INFO: Pod "back-off-cap": Phase="Running", Reason="", readiness=true. Elapsed: 18.132555069s Nov 26 00:13:36.154: INFO: The phase of Pod back-off-cap is Running (Ready = true) Nov 26 00:13:36.154: INFO: Pod "back-off-cap" satisfied condition "running and ready" ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 6m59.351s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 5m0s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 6 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 7m19.353s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 5m20.002s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 6 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 7m39.355s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 5m40.004s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 7 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 7m59.356s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 6m0.006s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 7 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 8m19.359s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 6m20.009s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 7 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 8m39.362s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 6m40.011s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 8 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 8m59.532s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 7m0.181s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 8 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 9m19.534s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 7m20.184s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 8 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 9m39.536s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 7m40.185s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 9 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 9m59.538s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 8m0.187s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 9 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 10m19.54s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 8m20.19s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 9 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 10m39.542s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 8m40.192s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 10 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 10m59.545s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 9m0.194s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 10 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 11m19.546s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 9m20.196s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 10 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 11m39.548s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 9m40.197s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 11 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #21 Automatically polling progress: [sig-node] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance] (Spec Runtime: 11m59.549s) test/e2e/common/node/pods.go:717 In [It] (Node Runtime: 10m0.199s) test/e2e/common/node/pods.go:717 Spec Goroutine goroutine 1483 [sleep, 11 minutes] time.Sleep(0x8bb2c97000) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:737 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00354e300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ STEP: getting restart delay when capped 11/26/22 00:23:36.234 Nov 26 00:23:37.274: INFO: Unexpected error: getting pod back-off-cap: <*url.Error | 0xc004586000>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/pods-2454/pods/back-off-cap", Err: <*net.OpError | 0xc000c8bf40>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0041bd890>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc00164e000>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:23:37.275: FAIL: getting pod back-off-cap: Get "https://34.168.120.117/api/v1/namespaces/pods-2454/pods/back-off-cap": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/common/node.getRestartDelay(0xc0011e4780, {0x75d2035, 0xc}, {0x75d2035, 0xc}) test/e2e/common/node/pods.go:129 +0x225 k8s.io/kubernetes/test/e2e/common/node.glob..func15.10() test/e2e/common/node/pods.go:746 +0x37f [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 Nov 26 00:23:37.275: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:23:37.315 STEP: Collecting events from namespace "pods-2454". 11/26/22 00:23:37.315 Nov 26 00:23:37.355: INFO: Unexpected error: failed to list events in namespace "pods-2454": <*url.Error | 0xc004586720>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/pods-2454/events", Err: <*net.OpError | 0xc003a5a1e0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0046aa720>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc00164e540>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:23:37.355: FAIL: failed to list events in namespace "pods-2454": Get "https://34.168.120.117/api/v1/namespaces/pods-2454/events": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc0015605c0, {0xc00346e190, 0x9}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc0042e3d40}, {0xc00346e190, 0x9}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc001560650?, {0xc00346e190?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc0002ffe00) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc001763410?, 0xc0000cefb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc0015c9dc8?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc001763410?, 0x29449fc?}, {0xae73300?, 0xc0000cef80?, 0x2d5dcbd?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 STEP: Destroying namespace "pods-2454" for this suite. 11/26/22 00:23:37.355 Nov 26 00:23:37.395: FAIL: Couldn't delete ns: "pods-2454": Delete "https://34.168.120.117/api/v1/namespaces/pods-2454": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/pods-2454", Err:(*net.OpError)(0xc0034dd950)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc0002ffe00) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc001763370?, 0xc004273fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc001763370?, 0x0?}, {0xae73300?, 0x5?, 0xc003a5c198?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-node\]\sPods\sshould\shave\stheir\sauto\-restart\sback\-off\stimer\sreset\son\simage\supdate\s\[Slow\]\[NodeConformance\]$'
test/e2e/common/node/pods.go:129 k8s.io/kubernetes/test/e2e/common/node.getRestartDelay(0xc000f80ee8, {0x75f4694, 0x12}, {0x75c2b3f, 0x8}) test/e2e/common/node/pods.go:129 +0x225 k8s.io/kubernetes/test/e2e/common/node.startPodAndGetBackOffs(0x6aba880?, 0xc002549680, 0x75b7ada?) test/e2e/common/node/pods.go:109 +0x1d0 k8s.io/kubernetes/test/e2e/common/node.glob..func15.9() test/e2e/common/node/pods.go:695 +0x336 There were additional failures detected after the initial failure: [FAILED] Nov 26 00:02:57.409: failed to list events in namespace "pods-2818": Get "https://34.168.120.117/api/v1/namespaces/pods-2818/events": dial tcp 34.168.120.117:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 00:02:57.449: Couldn't delete ns: "pods-2818": Delete "https://34.168.120.117/api/v1/namespaces/pods-2818": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/pods-2818", Err:(*net.OpError)(0xc00448fc20)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-node] Pods set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:00:40.218 Nov 26 00:00:40.218: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename pods 11/26/22 00:00:40.219 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 00:00:40.363 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 00:00:40.454 [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-node] Pods test/e2e/common/node/pods.go:194 [It] should have their auto-restart back-off timer reset on image update [Slow][NodeConformance] test/e2e/common/node/pods.go:676 Nov 26 00:00:40.598: INFO: Waiting up to 5m0s for pod "pod-back-off-image" in namespace "pods-2818" to be "running and ready" Nov 26 00:00:40.641: INFO: Pod "pod-back-off-image": Phase="Pending", Reason="", readiness=false. Elapsed: 42.639295ms Nov 26 00:00:40.641: INFO: The phase of Pod pod-back-off-image is Pending, waiting for it to be Running (with Ready = true) Nov 26 00:00:42.684: INFO: Pod "pod-back-off-image": Phase="Running", Reason="", readiness=true. Elapsed: 2.086107457s Nov 26 00:00:42.685: INFO: The phase of Pod pod-back-off-image is Running (Ready = true) Nov 26 00:00:42.685: INFO: Pod "pod-back-off-image" satisfied condition "running and ready" STEP: getting restart delay-0 11/26/22 00:01:42.729 Nov 26 00:01:43.774: INFO: Container's last state is not "Terminated". Nov 26 00:02:27.642: INFO: getRestartDelay: restartCount = 4, finishedAt=2022-11-26 00:01:41 +0000 UTC restartedAt=2022-11-26 00:02:26 +0000 UTC (45s) STEP: getting restart delay-1 11/26/22 00:02:27.642 Nov 26 00:02:57.329: INFO: Unexpected error: getting pod pod-back-off-image: <*url.Error | 0xc0050a6540>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/pods-2818/pods/pod-back-off-image", Err: <*net.OpError | 0xc0031b96d0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc004589a10>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc000d4ca40>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:02:57.329: FAIL: getting pod pod-back-off-image: Get "https://34.168.120.117/api/v1/namespaces/pods-2818/pods/pod-back-off-image": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/common/node.getRestartDelay(0xc000f80ee8, {0x75f4694, 0x12}, {0x75c2b3f, 0x8}) test/e2e/common/node/pods.go:129 +0x225 k8s.io/kubernetes/test/e2e/common/node.startPodAndGetBackOffs(0x6aba880?, 0xc002549680, 0x75b7ada?) test/e2e/common/node/pods.go:109 +0x1d0 k8s.io/kubernetes/test/e2e/common/node.glob..func15.9() test/e2e/common/node/pods.go:695 +0x336 [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 Nov 26 00:02:57.329: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:02:57.369 STEP: Collecting events from namespace "pods-2818". 11/26/22 00:02:57.369 Nov 26 00:02:57.409: INFO: Unexpected error: failed to list events in namespace "pods-2818": <*url.Error | 0xc0050a69c0>: { Op: "Get", URL: "https://34.168.120.117/api/v1/namespaces/pods-2818/events", Err: <*net.OpError | 0xc0031b9900>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc002ec6630>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 120, 117], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc000d4cda0>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 00:02:57.409: FAIL: failed to list events in namespace "pods-2818": Get "https://34.168.120.117/api/v1/namespaces/pods-2818/events": dial tcp 34.168.120.117:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc001aba5c0, {0xc002dd7b00, 0x9}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc0044f7520}, {0xc002dd7b00, 0x9}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc001aba650?, {0xc002dd7b00?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc000376ff0) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc0001ec4b0?, 0xc0000cefb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc003450088?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc0001ec4b0?, 0x29449fc?}, {0xae73300?, 0xc0000cef80?, 0x0?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 STEP: Destroying namespace "pods-2818" for this suite. 11/26/22 00:02:57.409 Nov 26 00:02:57.449: FAIL: Couldn't delete ns: "pods-2818": Delete "https://34.168.120.117/api/v1/namespaces/pods-2818": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/pods-2818", Err:(*net.OpError)(0xc00448fc20)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc000376ff0) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc0001ec290?, 0xc001698fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc0001ec290?, 0x0?}, {0xae73300?, 0x5?, 0xc000e1d1a0?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-node\]\sVariable\sExpansion\sshould\sfail\ssubstituting\svalues\sin\sa\svolume\ssubpath\swith\sabsolute\spath\s\[Slow\]\s\[Conformance\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000377680) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:20:07.555 Nov 26 00:20:07.555: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename var-expansion 11/26/22 00:20:07.556 Nov 26 00:20:07.596: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:09.635: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:11.635: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:13.637: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:15.635: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:17.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:19.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:21.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:23.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:25.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:27.635: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:29.635: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:31.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:33.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:35.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:37.636: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:37.675: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:20:37.675: INFO: Unexpected error: <*errors.errorString | 0xc000209d20>: { s: "timed out waiting for the condition", } Nov 26 00:20:37.675: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000377680) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 Nov 26 00:20:37.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:20:37.715 [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-node\]\sVariable\sExpansion\sshould\sfail\ssubstituting\svalues\sin\sa\svolume\ssubpath\swith\sbackticks\s\[Slow\]\s\[Conformance\]$'
test/e2e/framework/framework.go:241 k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000381590) test/e2e/framework/framework.go:241 +0x96ffrom junit_01.xml
[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:04:50.774 Nov 26 00:04:50.774: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename var-expansion 11/26/22 00:04:50.776 Nov 26 00:04:50.816: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:52.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:54.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:56.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:04:58.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:00.855: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:02.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:04.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:06.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:08.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:10.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:12.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:14.857: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:16.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:18.856: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:20.855: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:20.895: INFO: Unexpected error while creating namespace: Post "https://34.168.120.117/api/v1/namespaces": dial tcp 34.168.120.117:443: connect: connection refused Nov 26 00:05:20.895: INFO: Unexpected error: <*errors.errorString | 0xc00011dd80>: { s: "timed out waiting for the condition", } Nov 26 00:05:20.895: FAIL: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).BeforeEach(0xc000381590) test/e2e/framework/framework.go:241 +0x96f [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 Nov 26 00:05:20.896: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 00:05:20.936 [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-node\]\sVariable\sExpansion\sshould\sverify\sthat\sa\sfailing\ssubpath\sexpansion\scan\sbe\smodified\sduring\sthe\slifecycle\sof\sa\scontainer\s\[Slow\]\s\[Conformance\]$'
test/e2e/framework/pod/pod_client.go:134 k8s.io/kubernetes/test/e2e/framework/pod.(*PodClient).Update(0xc0017bd710?, {0xc002f9b7c0?, 0x32?}, 0x78958b0?) test/e2e/framework/pod/pod_client.go:134 +0xd5 k8s.io/kubernetes/test/e2e/common/node.glob..func7.7() test/e2e/common/node/expansion.go:272 +0x3e6 There were additional failures detected after the initial failure: [FAILED] Nov 26 00:02:59.165: failed to list events in namespace "var-expansion-1826": Get "https://34.168.120.117/api/v1/namespaces/var-expansion-1826/events": dial tcp 34.168.120.117:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 00:02:59.204: Couldn't delete ns: "var-expansion-1826": Delete "https://34.168.120.117/api/v1/namespaces/var-expansion-1826": dial tcp 34.168.120.117:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.120.117/api/v1/namespaces/var-expansion-1826", Err:(*net.OpError)(0xc003108500)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370
[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 00:01:46.122 Nov 26 00:01:46.122: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename var-expansion 11/26/22 00:01:46.124 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 00:01:46.251 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 00:01:46.332 [BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 [It] should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] test/e2e/common/node/expansion.go:225 STEP: creating the pod with failed condition 11/26/22 00:01:46.414 Nov 26 00:01:46.462: INFO: Waiting up to 2m0s for pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364" in namespace "var-expansion-1826" to be "running" Nov 26 00:01:46.504: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 41.190845ms Nov 26 00:01:48.550: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 2.087534772s Nov 26 00:01:50.546: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 4.083460197s Nov 26 00:01:52.558: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 6.095538405s Nov 26 00:01:54.559: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 8.096525342s Nov 26 00:01:56.546: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 10.083303224s Nov 26 00:01:58.547: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 12.084531826s Nov 26 00:02:00.546: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 14.083115514s Nov 26 00:02:02.548: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 16.085124944s Nov 26 00:02:04.547: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 18.084448082s Nov 26 00:02:06.546: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 20.08340391s Nov 26 00:02:08.547: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 22.084581514s Nov 26 00:02:10.546: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 24.083789785s Nov 26 00:02:12.551: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 26.088702453s Nov 26 00:02:14.547: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 28.084722094s Nov 26 00:02:16.546: INFO: Pod "var-expansion-5d5e62ea-c0e8-4fb3-be3d-1c786f246364": Phase="Pending", Reason="", readiness=false. Elapsed: 30.0834132