PR | nckturner: Webhook framework for cloud controller manager |
Result | FAILURE |
Tests | 21 failed / 76 succeeded |
Started | |
Elapsed | 2h1m |
Revision | |
Builder | b7acb360-bd26-11ed-bb68-063a2dde5b06 |
Refs |
master:2bac225e 108838:3321f487 |
infra-commit | 223d52f1a |
job-version | v1.27.0-alpha.3.190+dcc34e0a318fce |
kubetest-version | v20230222-b5208facd4 |
repo | k8s.io/kubernetes |
repo-commit | dcc34e0a318fcecca2f81497ea268a574d3b4469 |
repos | {u'k8s.io/kubernetes': u'master:2bac225e4275dae2d01746f500c8b1bd57558455,108838:3321f4876f1d8b020f083bd3ff8f849ed0262ea3'} |
revision | v1.27.0-alpha.3.190+dcc34e0a318fce |
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-api\-machinery\]\sAdmissionWebhook\s\[Privileged\:ClusterAdmin\]\slisting\smutating\swebhooks\sshould\swork\s\[Conformance\]$'
[FAILED] waiting for webhook configuration to be ready: timed out waiting for the condition In [It] at: test/e2e/apimachinery/webhook.go:672 @ 03/07/23 21:40:37.93from junit_01.xml
> Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 21:39:23.964 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:39:23.965 Mar 7 21:39:23.965: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename webhook - test/e2e/framework/framework.go:250 @ 03/07/23 21:39:23.966 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:39:23.979 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:39:23.983 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 21:39:23.987 (22ms) > Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:39:23.987 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:39:23.987 (0s) > Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:91 @ 03/07/23 21:39:23.987 STEP: Setting up server cert - test/e2e/apimachinery/webhook.go:99 @ 03/07/23 21:39:24.001 STEP: Create role binding to let webhook read extension-apiserver-authentication - test/e2e/apimachinery/webhook.go:709 @ 03/07/23 21:39:24.605 STEP: Deploying the webhook pod - test/e2e/apimachinery/webhook.go:741 @ 03/07/23 21:39:24.612 STEP: Wait for the deployment to be ready - test/e2e/apimachinery/webhook.go:811 @ 03/07/23 21:39:24.624 Mar 7 21:39:24.631: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set Mar 7 21:39:26.642: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:39:28.645: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:39:30.646: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:39:32.645: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:39:34.646: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:39:36.645: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:39:38.646: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:39:40.646: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 24, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 39, 32, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service - test/e2e/apimachinery/webhook.go:817 @ 03/07/23 21:39:42.646 STEP: Verifying the service has paired with the endpoint - test/e2e/apimachinery/webhook.go:840 @ 03/07/23 21:39:42.653 Mar 7 21:39:43.653: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:91 @ 03/07/23 21:39:43.657 (19.67s) > Enter [It] listing mutating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:645 @ 03/07/23 21:39:43.657 STEP: Listing all of the created validation webhooks - test/e2e/apimachinery/webhook.go:665 @ 03/07/23 21:39:43.728 Mar 7 21:39:43.756: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:43.883: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:43.982: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:44.081: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:44.181: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:54.512: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:04.811: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:07.961: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:08.082: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:08.182: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:08.280: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:08.382: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:27.696: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:37.930: INFO: Waiting for webhook configuration to be ready... Mar 7 21:40:37.930: INFO: Unexpected error: waiting for webhook configuration to be ready: <*errors.errorString | 0xaaf3910>: { s: "timed out waiting for the condition", } [FAILED] waiting for webhook configuration to be ready: timed out waiting for the condition In [It] at: test/e2e/apimachinery/webhook.go:672 @ 03/07/23 21:40:37.93 < Exit [It] listing mutating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:645 @ 03/07/23 21:40:37.93 (54.273s) > Enter [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:40:37.93 Mar 7 21:40:37.930: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:40:37.933 (3ms) > Enter [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:106 @ 03/07/23 21:40:37.933 < Exit [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:106 @ 03/07/23 21:40:37.967 (34ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:40:37.967 < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:40:37.967 (0s) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 21:40:37.967 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:40:37.967 STEP: Collecting events from namespace "webhook-2839". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:40:37.967 STEP: Found 10 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:40:37.971 Mar 7 21:40:37.971: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for sample-webhook-deployment-7497495989-7dwj9: { } Scheduled: Successfully assigned webhook-2839/sample-webhook-deployment-7497495989-7dwj9 to 172.17.0.1 Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:24 +0000 UTC - event for sample-webhook-deployment: {deployment-controller } ScalingReplicaSet: Scaled up replica set sample-webhook-deployment-7497495989 to 1 Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:24 +0000 UTC - event for sample-webhook-deployment-7497495989: {replicaset-controller } SuccessfulCreate: Created pod: sample-webhook-deployment-7497495989-7dwj9 Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:27 +0000 UTC - event for sample-webhook-deployment-7497495989-7dwj9: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:27 +0000 UTC - event for sample-webhook-deployment-7497495989-7dwj9: {kubelet 172.17.0.1} Created: Created container sample-webhook Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:27 +0000 UTC - event for sample-webhook-deployment-7497495989-7dwj9: {kubelet 172.17.0.1} Started: Started container sample-webhook Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:28 +0000 UTC - event for sample-webhook-deployment-7497495989-7dwj9: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:31 +0000 UTC - event for sample-webhook-deployment-7497495989-7dwj9: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "https://10.88.6.112:8444/readyz": dial tcp 10.88.6.112:8444: connect: connection refused Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:33 +0000 UTC - event for sample-webhook-deployment-7497495989-7dwj9: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container sample-webhook in pod sample-webhook-deployment-7497495989-7dwj9_webhook-2839(645437e4-dc17-4a8c-8faf-01eac4845d38) Mar 7 21:40:37.971: INFO: At 2023-03-07 21:39:43 +0000 UTC - event for sample-webhook-deployment-7497495989-7dwj9: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "https://10.88.6.118:8444/readyz": dial tcp 10.88.6.118:8444: connect: connection refused Mar 7 21:40:37.973: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:40:37.973: INFO: Mar 7 21:40:37.976: INFO: Logging node info for node 172.17.0.1 Mar 7 21:40:37.978: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 5497 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:38:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:40:37.979: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:40:37.982: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:40:37.987: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:40:37.987: INFO: Container coredns ready: false, restart count 12 Mar 7 21:40:38.037: INFO: Latency metrics for node 172.17.0.1 STEP: Collecting events from namespace "webhook-markers-6351". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:40:38.037 STEP: Found 0 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:40:38.04 Mar 7 21:40:38.042: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:40:38.042: INFO: Mar 7 21:40:38.045: INFO: Logging node info for node 172.17.0.1 Mar 7 21:40:38.047: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 5497 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:38:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:40:38.047: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:40:38.050: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:40:38.054: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:40:38.054: INFO: Container coredns ready: false, restart count 12 Mar 7 21:40:38.083: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:40:38.083 (117ms) < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 21:40:38.084 (117ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 21:40:38.084 STEP: Destroying namespace "webhook-2839" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:40:38.084 STEP: Destroying namespace "webhook-markers-6351" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:40:38.089 < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 21:40:38.094 (10ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:40:38.094 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:40:38.094 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-api\-machinery\]\sAdmissionWebhook\s\[Privileged\:ClusterAdmin\]\slisting\svalidating\swebhooks\sshould\swork\s\[Conformance\]$'
[FAILED] waiting for webhook configuration to be ready: timed out waiting for the condition In [It] at: test/e2e/apimachinery/webhook.go:598 @ 03/07/23 21:16:58.041from junit_01.xml
> Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 21:16:20.87 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:16:20.871 Mar 7 21:16:20.871: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename webhook - test/e2e/framework/framework.go:250 @ 03/07/23 21:16:20.872 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:16:20.886 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:16:20.89 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 21:16:20.894 (24ms) > Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:16:20.894 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:16:20.894 (0s) > Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:91 @ 03/07/23 21:16:20.894 STEP: Setting up server cert - test/e2e/apimachinery/webhook.go:99 @ 03/07/23 21:16:20.913 STEP: Create role binding to let webhook read extension-apiserver-authentication - test/e2e/apimachinery/webhook.go:709 @ 03/07/23 21:16:21.208 STEP: Deploying the webhook pod - test/e2e/apimachinery/webhook.go:741 @ 03/07/23 21:16:21.22 STEP: Wait for the deployment to be ready - test/e2e/apimachinery/webhook.go:811 @ 03/07/23 21:16:21.234 Mar 7 21:16:21.242: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set Mar 7 21:16:23.253: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 16, 21, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 16, 21, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 16, 21, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 16, 21, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" is progressing."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service - test/e2e/apimachinery/webhook.go:817 @ 03/07/23 21:16:25.259 STEP: Verifying the service has paired with the endpoint - test/e2e/apimachinery/webhook.go:840 @ 03/07/23 21:16:25.267 Mar 7 21:16:26.267: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:91 @ 03/07/23 21:16:26.271 (5.377s) > Enter [It] listing validating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:571 @ 03/07/23 21:16:26.271 STEP: Listing all of the created validation webhooks - test/e2e/apimachinery/webhook.go:591 @ 03/07/23 21:16:26.335 Mar 7 21:16:26.359: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:36.484: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:37.592: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:38.709: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:39.796: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:40.885: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:42.004: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:42.084: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:42.184: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:42.281: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:42.381: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:42.486: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:52.586: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:53.684: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:54.808: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:55.899: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:57.013: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:58.041: INFO: Waiting for webhook configuration to be ready... Mar 7 21:16:58.041: INFO: Unexpected error: waiting for webhook configuration to be ready: <*errors.errorString | 0xaaf3910>: { s: "timed out waiting for the condition", } [FAILED] waiting for webhook configuration to be ready: timed out waiting for the condition In [It] at: test/e2e/apimachinery/webhook.go:598 @ 03/07/23 21:16:58.041 < Exit [It] listing validating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:571 @ 03/07/23 21:16:58.041 (31.77s) > Enter [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:16:58.041 Mar 7 21:16:58.041: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:16:58.044 (3ms) > Enter [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:106 @ 03/07/23 21:16:58.044 < Exit [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:106 @ 03/07/23 21:16:58.073 (29ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:16:58.073 < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:16:58.073 (0s) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 21:16:58.073 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:16:58.073 STEP: Collecting events from namespace "webhook-4646". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:16:58.073 STEP: Found 11 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:16:58.078 Mar 7 21:16:58.078: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: { } Scheduled: Successfully assigned webhook-4646/sample-webhook-deployment-7497495989-qcf7b to 172.17.0.1 Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:21 +0000 UTC - event for sample-webhook-deployment: {deployment-controller } ScalingReplicaSet: Scaled up replica set sample-webhook-deployment-7497495989 to 1 Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:21 +0000 UTC - event for sample-webhook-deployment-7497495989: {replicaset-controller } SuccessfulCreate: Created pod: sample-webhook-deployment-7497495989-qcf7b Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:23 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:23 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: {kubelet 172.17.0.1} Created: Created container sample-webhook Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:23 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: {kubelet 172.17.0.1} Started: Started container sample-webhook Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:25 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "https://10.88.1.79:8444/readyz": dial tcp 10.88.1.79:8444: connect: connection refused Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:26 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:29 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "https://10.88.1.81:8444/readyz": dial tcp 10.88.1.81:8444: connect: connection refused Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:32 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container sample-webhook in pod sample-webhook-deployment-7497495989-qcf7b_webhook-4646(36ee62ae-fe0a-47c2-b7f2-22b3c0427808) Mar 7 21:16:58.078: INFO: At 2023-03-07 21:16:41 +0000 UTC - event for sample-webhook-deployment-7497495989-qcf7b: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "https://10.88.1.87:8444/readyz": dial tcp 10.88.1.87:8444: connect: connection refused Mar 7 21:16:58.082: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:16:58.082: INFO: sample-webhook-deployment-7497495989-qcf7b 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:16:21 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:16:42 +0000 UTC ContainersNotReady containers with unready status: [sample-webhook]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:16:42 +0000 UTC ContainersNotReady containers with unready status: [sample-webhook]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:16:21 +0000 UTC }] Mar 7 21:16:58.082: INFO: Mar 7 21:16:58.096: INFO: Logging node info for node 172.17.0.1 Mar 7 21:16:58.100: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 2017 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:12:28 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:12:28 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:12:28 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:12:28 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:12:28 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:16:58.101: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:16:58.106: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:16:58.111: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:16:58.111: INFO: Container coredns ready: false, restart count 8 Mar 7 21:16:58.146: INFO: Latency metrics for node 172.17.0.1 STEP: Collecting events from namespace "webhook-markers-3941". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:16:58.146 STEP: Found 0 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:16:58.149 Mar 7 21:16:58.152: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:16:58.152: INFO: Mar 7 21:16:58.156: INFO: Logging node info for node 172.17.0.1 Mar 7 21:16:58.160: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 2017 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:12:28 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:12:28 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:12:28 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:12:28 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:12:28 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:16:58.160: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:16:58.164: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:16:58.170: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:16:58.170: INFO: Container coredns ready: false, restart count 8 Mar 7 21:16:58.207: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:16:58.207 (134ms) < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 21:16:58.207 (134ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 21:16:58.207 STEP: Destroying namespace "webhook-4646" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:16:58.207 STEP: Destroying namespace "webhook-markers-3941" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:16:58.214 < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 21:16:58.224 (17ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:16:58.224 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:16:58.224 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-api\-machinery\]\sAdmissionWebhook\s\[Privileged\:ClusterAdmin\]\sshould\sbe\sable\sto\sdeny\spod\sand\sconfigmap\screation\s\[Conformance\]$'
[FAILED] waiting for webhook configuration to be ready: timed out waiting for the condition In [It] at: test/e2e/apimachinery/webhook.go:878 @ 03/07/23 21:39:23.792from junit_01.xml
> Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 21:38:42.953 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:38:42.953 Mar 7 21:38:42.953: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename webhook - test/e2e/framework/framework.go:250 @ 03/07/23 21:38:42.954 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:38:42.963 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:38:42.967 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 21:38:42.97 (17ms) > Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:38:42.97 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:38:42.97 (0s) > Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:91 @ 03/07/23 21:38:42.97 STEP: Setting up server cert - test/e2e/apimachinery/webhook.go:99 @ 03/07/23 21:38:42.986 STEP: Create role binding to let webhook read extension-apiserver-authentication - test/e2e/apimachinery/webhook.go:709 @ 03/07/23 21:38:43.527 STEP: Deploying the webhook pod - test/e2e/apimachinery/webhook.go:741 @ 03/07/23 21:38:43.535 STEP: Wait for the deployment to be ready - test/e2e/apimachinery/webhook.go:811 @ 03/07/23 21:38:43.549 Mar 7 21:38:43.557: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set Mar 7 21:38:45.566: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 38, 43, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 38, 43, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 38, 43, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 38, 43, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" is progressing."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service - test/e2e/apimachinery/webhook.go:817 @ 03/07/23 21:38:47.57 STEP: Verifying the service has paired with the endpoint - test/e2e/apimachinery/webhook.go:840 @ 03/07/23 21:38:47.578 Mar 7 21:38:48.579: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 21:38:49.579: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 21:38:50.579: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 21:38:51.579: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:91 @ 03/07/23 21:38:51.583 (8.612s) > Enter [It] should be able to deny pod and configmap creation [Conformance] - test/e2e/apimachinery/webhook.go:198 @ 03/07/23 21:38:51.583 STEP: Registering the webhook via the AdmissionRegistration API - test/e2e/apimachinery/webhook.go:849 @ 03/07/23 21:38:51.583 Mar 7 21:38:52.620: INFO: Waiting for webhook configuration to be ready... Mar 7 21:38:53.737: INFO: Waiting for webhook configuration to be ready... Mar 7 21:38:54.859: INFO: Waiting for webhook configuration to be ready... Mar 7 21:38:55.946: INFO: Waiting for webhook configuration to be ready... Mar 7 21:38:57.067: INFO: Waiting for webhook configuration to be ready... Mar 7 21:38:58.157: INFO: Waiting for webhook configuration to be ready... Mar 7 21:38:59.242: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:00.366: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:01.449: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:02.538: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:03.660: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:03.731: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:03.830: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:13.933: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:15.050: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:16.138: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:17.257: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:18.345: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:19.433: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:20.554: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:21.642: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:22.761: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:23.792: INFO: Waiting for webhook configuration to be ready... Mar 7 21:39:23.792: INFO: Unexpected error: waiting for webhook configuration to be ready: <*errors.errorString | 0xaaf3910>: { s: "timed out waiting for the condition", } [FAILED] waiting for webhook configuration to be ready: timed out waiting for the condition In [It] at: test/e2e/apimachinery/webhook.go:878 @ 03/07/23 21:39:23.792 < Exit [It] should be able to deny pod and configmap creation [Conformance] - test/e2e/apimachinery/webhook.go:198 @ 03/07/23 21:39:23.792 (32.21s) > Enter [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:39:23.792 Mar 7 21:39:23.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:39:23.796 (4ms) > Enter [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:106 @ 03/07/23 21:39:23.796 < Exit [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:106 @ 03/07/23 21:39:23.835 (39ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:39:23.835 < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:39:23.835 (0s) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 21:39:23.835 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:39:23.835 STEP: Collecting events from namespace "webhook-271". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:39:23.835 STEP: Found 8 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:39:23.838 Mar 7 21:39:23.838: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for sample-webhook-deployment-7497495989-ltbsc: { } Scheduled: Successfully assigned webhook-271/sample-webhook-deployment-7497495989-ltbsc to 172.17.0.1 Mar 7 21:39:23.838: INFO: At 2023-03-07 21:38:43 +0000 UTC - event for sample-webhook-deployment: {deployment-controller } ScalingReplicaSet: Scaled up replica set sample-webhook-deployment-7497495989 to 1 Mar 7 21:39:23.838: INFO: At 2023-03-07 21:38:43 +0000 UTC - event for sample-webhook-deployment-7497495989: {replicaset-controller } SuccessfulCreate: Created pod: sample-webhook-deployment-7497495989-ltbsc Mar 7 21:39:23.838: INFO: At 2023-03-07 21:38:46 +0000 UTC - event for sample-webhook-deployment-7497495989-ltbsc: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 21:39:23.838: INFO: At 2023-03-07 21:38:46 +0000 UTC - event for sample-webhook-deployment-7497495989-ltbsc: {kubelet 172.17.0.1} Created: Created container sample-webhook Mar 7 21:39:23.838: INFO: At 2023-03-07 21:38:46 +0000 UTC - event for sample-webhook-deployment-7497495989-ltbsc: {kubelet 172.17.0.1} Started: Started container sample-webhook Mar 7 21:39:23.838: INFO: At 2023-03-07 21:38:47 +0000 UTC - event for sample-webhook-deployment-7497495989-ltbsc: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:39:23.838: INFO: At 2023-03-07 21:38:53 +0000 UTC - event for sample-webhook-deployment-7497495989-ltbsc: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container sample-webhook in pod sample-webhook-deployment-7497495989-ltbsc_webhook-271(7eca6328-e3f4-4219-9b30-7be11fac763e) Mar 7 21:39:23.843: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:39:23.843: INFO: sample-webhook-deployment-7497495989-ltbsc 172.17.0.1 Running 0s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:38:43 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:39:03 +0000 UTC ContainersNotReady containers with unready status: [sample-webhook]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:39:03 +0000 UTC ContainersNotReady containers with unready status: [sample-webhook]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:38:43 +0000 UTC }] Mar 7 21:39:23.843: INFO: Mar 7 21:39:23.845: INFO: Unable to fetch webhook-271/sample-webhook-deployment-7497495989-ltbsc/sample-webhook logs: the server could not find the requested resource (get pods sample-webhook-deployment-7497495989-ltbsc) Mar 7 21:39:23.848: INFO: Logging node info for node 172.17.0.1 Mar 7 21:39:23.850: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 5497 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:38:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:39:23.850: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:39:23.853: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:39:23.862: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:39:23.862: INFO: Container coredns ready: false, restart count 12 Mar 7 21:39:23.894: INFO: Latency metrics for node 172.17.0.1 STEP: Collecting events from namespace "webhook-markers-5748". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:39:23.894 STEP: Found 0 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:39:23.898 Mar 7 21:39:23.900: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:39:23.900: INFO: Mar 7 21:39:23.904: INFO: Logging node info for node 172.17.0.1 Mar 7 21:39:23.907: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 5497 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:38:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:38:19 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:39:23.907: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:39:23.911: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:39:23.916: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:39:23.916: INFO: Container coredns ready: false, restart count 12 Mar 7 21:39:23.951: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:39:23.951 (116ms) < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 21:39:23.951 (116ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 21:39:23.951 STEP: Destroying namespace "webhook-271" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:39:23.951 STEP: Destroying namespace "webhook-markers-5748" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:39:23.957 < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 21:39:23.961 (11ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:39:23.961 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:39:23.962 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-api\-machinery\]\sAdmissionWebhook\s\[Privileged\:ClusterAdmin\]\sshould\snot\sbe\sable\sto\smutate\sor\sprevent\sdeletion\sof\swebhook\sconfiguration\sobjects\s\[Conformance\]$'
[FAILED] waiting for webhook configuration to be ready: timed out waiting for the condition In [It] at: test/e2e/apimachinery/webhook.go:1315 @ 03/07/23 22:13:18.284from junit_01.xml
> Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 22:12:01.664 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 22:12:01.664 Mar 7 22:12:01.664: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename webhook - test/e2e/framework/framework.go:250 @ 03/07/23 22:12:01.665 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 22:12:01.679 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 22:12:01.684 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 22:12:01.687 (24ms) > Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 22:12:01.687 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 22:12:01.687 (0s) > Enter [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:91 @ 03/07/23 22:12:01.687 STEP: Setting up server cert - test/e2e/apimachinery/webhook.go:99 @ 03/07/23 22:12:01.706 STEP: Create role binding to let webhook read extension-apiserver-authentication - test/e2e/apimachinery/webhook.go:709 @ 03/07/23 22:12:02.138 STEP: Deploying the webhook pod - test/e2e/apimachinery/webhook.go:741 @ 03/07/23 22:12:02.146 STEP: Wait for the deployment to be ready - test/e2e/apimachinery/webhook.go:811 @ 03/07/23 22:12:02.158 Mar 7 22:12:02.168: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set Mar 7 22:12:04.179: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 22:12:06.184: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 5, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 6, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 6, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 22:12:08.186: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 5, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 6, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 6, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 22:12:10.183: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 5, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 22:12:12.182: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 5, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 22:12:14.183: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 5, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 22:12:16.183: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 5, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 22:12:18.184: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 5, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 22:12:20.183: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 5, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 2, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"sample-webhook-deployment-7497495989\" has successfully progressed."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 22, 12, 10, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service - test/e2e/apimachinery/webhook.go:817 @ 03/07/23 22:12:22.183 STEP: Verifying the service has paired with the endpoint - test/e2e/apimachinery/webhook.go:840 @ 03/07/23 22:12:22.192 Mar 7 22:12:23.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:24.193: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:25.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:26.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:27.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:28.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:29.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:30.193: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:31.193: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:32.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:33.193: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:34.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:35.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:36.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:37.193: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:38.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:39.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:40.193: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:41.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:42.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:43.193: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:44.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:45.193: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 Mar 7 22:12:46.192: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 < Exit [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:91 @ 03/07/23 22:12:46.196 (44.508s) > Enter [It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] - test/e2e/apimachinery/webhook.go:272 @ 03/07/23 22:12:46.196 STEP: Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API - test/e2e/apimachinery/webhook.go:1264 @ 03/07/23 22:12:46.196 Mar 7 22:12:46.217: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:46.328: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:46.431: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:47.531: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:48.654: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:49.739: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:50.862: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:51.950: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:53.035: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:54.154: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:55.246: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:56.330: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:57.450: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:58.539: INFO: Waiting for webhook configuration to be ready... Mar 7 22:12:59.660: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:00.748: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:01.834: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:02.961: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:04.047: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:05.134: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:06.250: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:07.338: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:08.460: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:09.547: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:10.638: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:11.758: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:12.842: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:13.930: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:15.051: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:16.141: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:17.261: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:18.284: INFO: Waiting for webhook configuration to be ready... Mar 7 22:13:18.284: INFO: Unexpected error: waiting for webhook configuration to be ready: <*errors.errorString | 0xaaf3910>: { s: "timed out waiting for the condition", } [FAILED] waiting for webhook configuration to be ready: timed out waiting for the condition In [It] at: test/e2e/apimachinery/webhook.go:1315 @ 03/07/23 22:13:18.284 < Exit [It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] - test/e2e/apimachinery/webhook.go:272 @ 03/07/23 22:13:18.284 (32.089s) > Enter [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 22:13:18.284 Mar 7 22:13:18.284: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 22:13:18.288 (4ms) > Enter [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:106 @ 03/07/23 22:13:18.288 < Exit [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:106 @ 03/07/23 22:13:18.321 (33ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 22:13:18.321 < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 22:13:18.321 (0s) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 22:13:18.321 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 22:13:18.321 STEP: Collecting events from namespace "webhook-9909". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 22:13:18.321 STEP: Found 3 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 22:13:18.326 Mar 7 22:13:18.326: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for sample-webhook-deployment-7497495989-bxz6s: { } Scheduled: Successfully assigned webhook-9909/sample-webhook-deployment-7497495989-bxz6s to 172.17.0.1 Mar 7 22:13:18.326: INFO: At 2023-03-07 22:12:02 +0000 UTC - event for sample-webhook-deployment: {deployment-controller } ScalingReplicaSet: Scaled up replica set sample-webhook-deployment-7497495989 to 1 Mar 7 22:13:18.326: INFO: At 2023-03-07 22:12:02 +0000 UTC - event for sample-webhook-deployment-7497495989: {replicaset-controller } SuccessfulCreate: Created pod: sample-webhook-deployment-7497495989-bxz6s Mar 7 22:13:18.331: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 22:13:18.331: INFO: Mar 7 22:13:18.334: INFO: Logging node info for node 172.17.0.1 Mar 7 22:13:18.338: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 10125 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 22:09:25 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 22:13:18.339: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 22:13:18.342: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 22:13:18.348: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 22:13:18.348: INFO: Container coredns ready: false, restart count 19 Mar 7 22:13:18.381: INFO: Latency metrics for node 172.17.0.1 STEP: Collecting events from namespace "webhook-markers-8575". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 22:13:18.381 STEP: Found 0 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 22:13:18.384 Mar 7 22:13:18.388: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 22:13:18.388: INFO: Mar 7 22:13:18.391: INFO: Logging node info for node 172.17.0.1 Mar 7 22:13:18.393: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 10125 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 22:09:25 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 22:13:18.394: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 22:13:18.396: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 22:13:18.401: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 22:13:18.401: INFO: Container coredns ready: false, restart count 19 Mar 7 22:13:18.432: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 22:13:18.432 (111ms) < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 22:13:18.432 (111ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 22:13:18.432 STEP: Destroying namespace "webhook-9909" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 22:13:18.432 STEP: Destroying namespace "webhook-markers-8575" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 22:13:18.439 < Exit [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 22:13:18.445 (13ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 22:13:18.445 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 22:13:18.445 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-api\-machinery\]\sCustomResourceConversionWebhook\s\[Privileged\:ClusterAdmin\]\sshould\sbe\sable\sto\sconvert\sa\snon\shomogeneous\slist\sof\sCRs\s\[Conformance\]$'
[FAILED] timed out waiting for the condition In [It] at: test/e2e/apimachinery/crd_conversion_webhook.go:481 @ 03/07/23 21:53:22.275from junit_01.xml
> Enter [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 21:52:31.47 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:52:31.47 Mar 7 21:52:31.470: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename crd-webhook - test/e2e/framework/framework.go:250 @ 03/07/23 21:52:31.471 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:52:31.483 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:52:31.487 < Exit [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:191 @ 03/07/23 21:52:31.491 (21ms) > Enter [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:52:31.491 < Exit [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:52:31.491 (0s) > Enter [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:125 @ 03/07/23 21:52:31.491 STEP: Setting up server cert - test/e2e/apimachinery/crd_conversion_webhook.go:128 @ 03/07/23 21:52:31.491 STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication - test/e2e/apimachinery/crd_conversion_webhook.go:213 @ 03/07/23 21:52:31.86 STEP: Deploying the custom resource conversion webhook pod - test/e2e/apimachinery/crd_conversion_webhook.go:242 @ 03/07/23 21:52:31.867 STEP: Wait for the deployment to be ready - test/e2e/apimachinery/crd_conversion_webhook.go:313 @ 03/07/23 21:52:31.879 Mar 7 21:52:31.887: INFO: deployment "sample-crd-conversion-webhook-deployment" doesn't have the required revision set Mar 7 21:52:33.900: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 52, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 52, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 52, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 52, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-crd-conversion-webhook-deployment-5969648595\" is progressing."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service - test/e2e/apimachinery/crd_conversion_webhook.go:321 @ 03/07/23 21:52:35.905 STEP: Verifying the service has paired with the endpoint - test/e2e/apimachinery/crd_conversion_webhook.go:344 @ 03/07/23 21:52:35.914 Mar 7 21:52:36.915: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 Mar 7 21:52:37.915: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 Mar 7 21:52:38.915: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 Mar 7 21:52:39.916: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 < Exit [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:125 @ 03/07/23 21:52:39.919 (8.428s) > Enter [It] should be able to convert a non homogeneous list of CRs [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:176 @ 03/07/23 21:52:39.919 Mar 7 21:52:39.919: INFO: >>> kubeConfig: /workspace/.kube/config Mar 7 21:52:43.460: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:52:44.579: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:52:45.668: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:52:46.787: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:52:47.875: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:52:48.995: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:52:50.083: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:52:51.171: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:53:21.265: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Mar 7 21:53:22.275: INFO: error waiting for conversion to succeed during setup: conversion for stable.example.com/v2, Kind=e2e-test-crd-webhook-7288-crd failed: conversion webhook for /, Kind= failed: Post "https://e2e-test-crd-conversion-webhook.crd-webhook-6017.svc:9443/crdconvert?timeout=30s": dial tcp 10.0.0.239:9443: connect: connection refused Mar 7 21:53:22.275: INFO: Unexpected error: <*errors.errorString | 0xaaf3910>: { s: "timed out waiting for the condition", } [FAILED] timed out waiting for the condition In [It] at: test/e2e/apimachinery/crd_conversion_webhook.go:481 @ 03/07/23 21:53:22.275 < Exit [It] should be able to convert a non homogeneous list of CRs [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:176 @ 03/07/23 21:53:22.275 (42.357s) > Enter [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:53:22.276 Mar 7 21:53:22.276: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:53:22.279 (3ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:199 @ 03/07/23 21:53:22.279 < Exit [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:199 @ 03/07/23 21:53:22.794 (515ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:126 @ 03/07/23 21:53:22.794 < Exit [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:126 @ 03/07/23 21:53:22.831 (37ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:53:22.831 < Exit [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:53:22.831 (0s) > Enter [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 21:53:22.831 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:53:22.831 STEP: Collecting events from namespace "crd-webhook-6017". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:53:22.831 STEP: Found 9 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:53:22.834 Mar 7 21:53:22.834: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for sample-crd-conversion-webhook-deployment-5969648595-9fqg2: { } Scheduled: Successfully assigned crd-webhook-6017/sample-crd-conversion-webhook-deployment-5969648595-9fqg2 to 172.17.0.1 Mar 7 21:53:22.834: INFO: At 2023-03-07 21:52:31 +0000 UTC - event for sample-crd-conversion-webhook-deployment: {deployment-controller } ScalingReplicaSet: Scaled up replica set sample-crd-conversion-webhook-deployment-5969648595 to 1 Mar 7 21:53:22.834: INFO: At 2023-03-07 21:52:31 +0000 UTC - event for sample-crd-conversion-webhook-deployment-5969648595: {replicaset-controller } SuccessfulCreate: Created pod: sample-crd-conversion-webhook-deployment-5969648595-9fqg2 Mar 7 21:53:22.834: INFO: At 2023-03-07 21:52:34 +0000 UTC - event for sample-crd-conversion-webhook-deployment-5969648595-9fqg2: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 21:53:22.834: INFO: At 2023-03-07 21:52:34 +0000 UTC - event for sample-crd-conversion-webhook-deployment-5969648595-9fqg2: {kubelet 172.17.0.1} Created: Created container sample-crd-conversion-webhook Mar 7 21:53:22.834: INFO: At 2023-03-07 21:52:34 +0000 UTC - event for sample-crd-conversion-webhook-deployment-5969648595-9fqg2: {kubelet 172.17.0.1} Started: Started container sample-crd-conversion-webhook Mar 7 21:53:22.834: INFO: At 2023-03-07 21:52:36 +0000 UTC - event for sample-crd-conversion-webhook-deployment-5969648595-9fqg2: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:53:22.834: INFO: At 2023-03-07 21:52:42 +0000 UTC - event for sample-crd-conversion-webhook-deployment-5969648595-9fqg2: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container sample-crd-conversion-webhook in pod sample-crd-conversion-webhook-deployment-5969648595-9fqg2_crd-webhook-6017(e7c56e46-7c15-4894-af7e-454ff010f8e9) Mar 7 21:53:22.834: INFO: At 2023-03-07 21:52:52 +0000 UTC - event for sample-crd-conversion-webhook-deployment-5969648595-9fqg2: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "https://10.88.7.251:9444/readyz": dial tcp 10.88.7.251:9444: connect: connection refused Mar 7 21:53:22.840: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:53:22.840: INFO: sample-crd-conversion-webhook-deployment-5969648595-9fqg2 172.17.0.1 Running 0s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:52:31 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:53:16 +0000 UTC ContainersNotReady containers with unready status: [sample-crd-conversion-webhook]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:53:16 +0000 UTC ContainersNotReady containers with unready status: [sample-crd-conversion-webhook]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:52:31 +0000 UTC }] Mar 7 21:53:22.840: INFO: Mar 7 21:53:22.842: INFO: Unable to fetch crd-webhook-6017/sample-crd-conversion-webhook-deployment-5969648595-9fqg2/sample-crd-conversion-webhook logs: the server could not find the requested resource (get pods sample-crd-conversion-webhook-deployment-5969648595-9fqg2) Mar 7 21:53:22.845: INFO: Logging node info for node 172.17.0.1 Mar 7 21:53:22.848: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 7505 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:52:46 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:52:46 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:52:46 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:52:46 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:52:46 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:53:22.848: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:53:22.851: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:53:22.856: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:53:22.856: INFO: Container coredns ready: false, restart count 15 Mar 7 21:53:22.887: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:53:22.887 (56ms) < Exit [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:209 @ 03/07/23 21:53:22.887 (56ms) > Enter [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 21:53:22.887 STEP: Destroying namespace "crd-webhook-6017" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:53:22.887 < Exit [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:206 @ 03/07/23 21:53:22.893 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:53:22.893 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:53:22.893 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sCronJob\sshould\sreplace\sjobs\swhen\sReplaceConcurrent\s\[Conformance\]$'
[FAILED] Failed to replace CronJob replace-27970451 in namespace cronjob-5668: more than one job is running [{TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27970451 GenerateName: Namespace:cronjob-5668 SelfLink: UID:e923a178-00b9-4441-b71b-95c79aec448d ResourceVersion:11269 Generation:1 CreationTimestamp:2023-03-07 22:11:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:e923a178-00b9-4441-b71b-95c79aec448d job-name:replace-27970451] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:47a7dec4-6f28-4bd5-b159-0fd0eb0706c4 Controller:0xc0030503b8 BlockOwnerDeletion:0xc0030503b9}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:11:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"47a7dec4-6f28-4bd5-b159-0fd0eb0706c4\"}":{}}},"f:spec":{"f:backoffLimit":{},"f:completionMode":{},"f:completions":{},"f:parallelism":{},"f:suspend":{},"f:template":{"f:spec":{"f:containers":{"k:{\"name\":\"c\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{},"f:volumeMounts":{".":{},"k:{\"mountPath\":\"/data\"}":{".":{},"f:mountPath":{},"f:name":{}}}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:volumes":{".":{},"k:{\"name\":\"data\"}":{".":{},"f:emptyDir":{},"f:name":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:11:08 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:status":{"f:conditions":{},"f:failed":{},"f:ready":{},"f:startTime":{},"f:uncountedTerminatedPods":{}}} Subresource:status}]} Spec:{Parallelism:0xc0030503f0 Completions:0xc0030503f4 ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc003050478 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: e923a178-00b9-4441-b71b-95c79aec448d,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:e923a178-00b9-4441-b71b-95c79aec448d job-name:replace-27970451] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc003050470 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0014fe2f0 Suspend:0xc00305049a} Status:{Conditions:[{Type:Failed Status:True LastProbeTime:2023-03-07 22:11:08 +0000 UTC LastTransitionTime:2023-03-07 22:11:08 +0000 UTC Reason:BackoffLimitExceeded Message:Job has reached the specified backoff limit}] StartTime:2023-03-07 22:11:00 +0000 UTC CompletionTime:<nil> Active:0 Succeeded:0 Failed:1 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc0030504ac}} {TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27970452 GenerateName: Namespace:cronjob-5668 SelfLink: UID:fd0bf32b-886d-4253-8402-8fdc042a8009 ResourceVersion:11341 Generation:1 CreationTimestamp:2023-03-07 22:12:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:fd0bf32b-886d-4253-8402-8fdc042a8009 job-name:replace-27970452] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:47a7dec4-6f28-4bd5-b159-0fd0eb0706c4 Controller:0xc003050518 BlockOwnerDeletion:0xc003050519}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:12:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"47a7dec4-6f28-4bd5-b159-0fd0eb0706c4\"}":{}}},"f:spec":{"f:backoffLimit":{},"f:completionMode":{},"f:completions":{},"f:parallelism":{},"f:suspend":{},"f:template":{"f:spec":{"f:containers":{"k:{\"name\":\"c\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{},"f:volumeMounts":{".":{},"k:{\"mountPath\":\"/data\"}":{".":{},"f:mountPath":{},"f:name":{}}}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:volumes":{".":{},"k:{\"name\":\"data\"}":{".":{},"f:emptyDir":{},"f:name":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:12:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:status":{"f:active":{},"f:ready":{},"f:startTime":{},"f:uncountedTerminatedPods":{}}} Subresource:status}]} Spec:{Parallelism:0xc003050550 Completions:0xc003050554 ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc0030505d8 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: fd0bf32b-886d-4253-8402-8fdc042a8009,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:fd0bf32b-886d-4253-8402-8fdc042a8009 job-name:replace-27970452] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc0030505d0 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0014fe310 Suspend:0xc0030505fa} Status:{Conditions:[] StartTime:2023-03-07 22:12:00 +0000 UTC CompletionTime:<nil> Active:1 Succeeded:0 Failed:0 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc0030505fc}}] In [It] at: test/e2e/apps/cronjob.go:185 @ 03/07/23 22:12:01.578from junit_01.xml
> Enter [BeforeEach] [sig-apps] CronJob - set up framework | framework.go:191 @ 03/07/23 22:10:45.502 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 22:10:45.502 Mar 7 22:10:45.502: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename cronjob - test/e2e/framework/framework.go:250 @ 03/07/23 22:10:45.504 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 22:10:45.523 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 22:10:45.529 < Exit [BeforeEach] [sig-apps] CronJob - set up framework | framework.go:191 @ 03/07/23 22:10:45.539 (36ms) > Enter [BeforeEach] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 22:10:45.539 < Exit [BeforeEach] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 22:10:45.539 (0s) > Enter [It] should replace jobs when ReplaceConcurrent [Conformance] - test/e2e/apps/cronjob.go:161 @ 03/07/23 22:10:45.539 STEP: Creating a ReplaceConcurrent cronjob - test/e2e/apps/cronjob.go:162 @ 03/07/23 22:10:45.539 STEP: Ensuring a job is scheduled - test/e2e/apps/cronjob.go:168 @ 03/07/23 22:10:45.562 STEP: Ensuring exactly one is scheduled - test/e2e/apps/cronjob.go:172 @ 03/07/23 22:11:01.567 STEP: Ensuring exactly one running job exists by listing jobs explicitly - test/e2e/apps/cronjob.go:177 @ 03/07/23 22:11:01.569 STEP: Ensuring the job is replaced with a new one - test/e2e/apps/cronjob.go:183 @ 03/07/23 22:11:01.572 Mar 7 22:12:01.578: INFO: Unexpected error: Failed to replace CronJob replace-27970451 in namespace cronjob-5668: <*errors.errorString | 0xc0014fef30>: { s: "more than one job is running [{TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27970451 GenerateName: Namespace:cronjob-5668 SelfLink: UID:e923a178-00b9-4441-b71b-95c79aec448d ResourceVersion:11269 Generation:1 CreationTimestamp:2023-03-07 22:11:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:e923a178-00b9-4441-b71b-95c79aec448d job-name:replace-27970451] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:47a7dec4-6f28-4bd5-b159-0fd0eb0706c4 Controller:0xc0030503b8 BlockOwnerDeletion:0xc0030503b9}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:11:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{\"f:metadata\":{\"f:ownerReferences\":{\".\":{},\"k:{\\\"uid\\\":\\\"47a7dec4-6f28-4bd5-b159-0fd0eb0706c4\\\"}\":{}}},\"f:spec\":{\"f:backoffLimit\":{},\"f:completionMode\":{},\"f:completions\":{},\"f:parallelism\":{},\"f:suspend\":{},\"f:template\":{\"f:spec\":{\"f:containers\":{\"k:{\\\"name\\\":\\\"c\\\"}\":{\".\":{},\"f:command\":{},\"f:image\":{},\"f:imagePullPolicy\":{},\"f:name\":{},\"f:resources\":{},\"f:terminationMessagePath\":{},\"f:terminationMessagePolicy\":{},\"f:volumeMounts\":{\".\":{},\"k:{\\\"mountPath\\\":\\\"/data\\\"}\":{\".\":{},\"f:mountPath\":{},\"f:name\":{}}}}},\"f:dnsPolicy\":{},\"f:restartPolicy\":{},\"f:schedulerName\":{},\"f:securityContext\":{},\"f:terminationGracePeriodSeconds\":{},\"f:volumes\":{\".\":{},\"k:{\\\"name\\\":\\\"data\\\"}\":{\".\":{},\"f:emptyDir\":{},\"f:name\":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:11:08 +0000 UTC FieldsType:FieldsV1 FieldsV1:{\"f:status\":{\"f:conditions\":{},\"f:failed\":{},\"f:ready\":{},\"f:startTime\":{},\"f:uncountedTerminatedPods\":{}}} Subresource:status}]} Spec:{Parallelism:0xc0030503f0 Completions:0xc0030503f4 ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc003050478 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: e923a178-00b9-4441-b71b-95c79aec448d,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:e923a178-00b9-4441-b71b-95c79aec448d job-name:replace-27970451] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc003050470 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0014fe2f0 Suspend:0xc00305049a} Status:{Conditions:[{Type:Failed Status:True LastProbeTime:2023-03-07 22:11:08 +0000 UTC LastTransitionTime:2023-03-07 22:11:08 +0000 UTC Reason:BackoffLimitExceeded Message:Job has reached the specified backoff limit}] StartTime:2023-03-07 22:11:00 +0000 UTC CompletionTime:<nil> Active:0 Succeeded:0 Failed:1 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc0030504ac}} {TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27970452 GenerateName: Namespace:cronjob-5668 SelfLink: UID:fd0bf32b-886d-4253-8402-8fdc042a8009 ResourceVersion:11341 Generation:1 CreationTimestamp:2023-03-07 22:12:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:fd0bf32b-886d-4253-8402-8fdc042a8009 job-name:replace-27970452] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:47a7dec4-6f28-4bd5-b159-0fd0eb0706c4 Controller:0xc003050518 BlockOwnerDeletion:0xc003050519}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:12:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{\"f:metadata\":{\"f:ownerReferences\":{\".\":{},\"k:{\\\"uid\\\":\\\"47a7dec4-6f28-4bd5-b159-0fd0eb0706c4\\\"}\":{}}},\"f:spec\":{\"f:backoffLimit\":{},\"f:completionMode\":{},\"f:completions\":{},\"f:parallelism\":{},\"f:suspend\":{},\"f:template\":{\"f:spec\":{\"f:containers\":{\"k:{\\\"name\\\":\\\"c\\\"}\":{\".\":{},\"f:command\":{},\"f:image\":{},\"f:imagePullPolicy\":{},\"f:name\":{},\"f:resources\":{},\"f:terminationMessagePath\":{},\"f:terminationMessagePolicy\":{},\"f:volumeMounts\":{\".\":{},\"k:{\\\"mountPath\\\":\\\"/data\\\"}\":{\".\":{},\"f:mountPath\":{},\"f:name\":{}}}}},\"f:dnsPolicy\":{},\"f:restartPolicy\":{},\"f:schedulerName\":{},\"f:securityContext\":{},\"f:terminationGracePeriodSeconds\":{},\"f:volumes\":{\".\":{},\"k:{\\\"name\\\":\\\"data\\\"}\":{\".\":{},\"f:emptyDir\":{},\"f:name\":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:12:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{\"f:status\":{\"f:active\":{},\"f:ready\":{},\"f:startTime\":{},\"f:uncountedTerminatedPods\":{}}} Subresource:status}]} Spec:{Parallelism:0xc003050550 Completions:0xc003050554 ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc0030505d8 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: fd0bf32b-886d-4253-8402-8fdc042a8009,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:fd0bf32b-886d-4253-8402-8fdc042a8009 job-name:replace-27970452] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:... Gomega truncated this representation as it exceeds 'format.MaxLength'. Consider having the object provide a custom 'GomegaStringer' representation or adjust the parameters in Gomega's 'format' package. Learn more here: https://onsi.github.io/gomega/#adjusting-output [FAILED] Failed to replace CronJob replace-27970451 in namespace cronjob-5668: more than one job is running [{TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27970451 GenerateName: Namespace:cronjob-5668 SelfLink: UID:e923a178-00b9-4441-b71b-95c79aec448d ResourceVersion:11269 Generation:1 CreationTimestamp:2023-03-07 22:11:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:e923a178-00b9-4441-b71b-95c79aec448d job-name:replace-27970451] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:47a7dec4-6f28-4bd5-b159-0fd0eb0706c4 Controller:0xc0030503b8 BlockOwnerDeletion:0xc0030503b9}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:11:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"47a7dec4-6f28-4bd5-b159-0fd0eb0706c4\"}":{}}},"f:spec":{"f:backoffLimit":{},"f:completionMode":{},"f:completions":{},"f:parallelism":{},"f:suspend":{},"f:template":{"f:spec":{"f:containers":{"k:{\"name\":\"c\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{},"f:volumeMounts":{".":{},"k:{\"mountPath\":\"/data\"}":{".":{},"f:mountPath":{},"f:name":{}}}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:volumes":{".":{},"k:{\"name\":\"data\"}":{".":{},"f:emptyDir":{},"f:name":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:11:08 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:status":{"f:conditions":{},"f:failed":{},"f:ready":{},"f:startTime":{},"f:uncountedTerminatedPods":{}}} Subresource:status}]} Spec:{Parallelism:0xc0030503f0 Completions:0xc0030503f4 ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc003050478 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: e923a178-00b9-4441-b71b-95c79aec448d,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:e923a178-00b9-4441-b71b-95c79aec448d job-name:replace-27970451] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc003050470 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0014fe2f0 Suspend:0xc00305049a} Status:{Conditions:[{Type:Failed Status:True LastProbeTime:2023-03-07 22:11:08 +0000 UTC LastTransitionTime:2023-03-07 22:11:08 +0000 UTC Reason:BackoffLimitExceeded Message:Job has reached the specified backoff limit}] StartTime:2023-03-07 22:11:00 +0000 UTC CompletionTime:<nil> Active:0 Succeeded:0 Failed:1 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc0030504ac}} {TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27970452 GenerateName: Namespace:cronjob-5668 SelfLink: UID:fd0bf32b-886d-4253-8402-8fdc042a8009 ResourceVersion:11341 Generation:1 CreationTimestamp:2023-03-07 22:12:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:fd0bf32b-886d-4253-8402-8fdc042a8009 job-name:replace-27970452] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:47a7dec4-6f28-4bd5-b159-0fd0eb0706c4 Controller:0xc003050518 BlockOwnerDeletion:0xc003050519}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:12:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"47a7dec4-6f28-4bd5-b159-0fd0eb0706c4\"}":{}}},"f:spec":{"f:backoffLimit":{},"f:completionMode":{},"f:completions":{},"f:parallelism":{},"f:suspend":{},"f:template":{"f:spec":{"f:containers":{"k:{\"name\":\"c\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{},"f:volumeMounts":{".":{},"k:{\"mountPath\":\"/data\"}":{".":{},"f:mountPath":{},"f:name":{}}}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:volumes":{".":{},"k:{\"name\":\"data\"}":{".":{},"f:emptyDir":{},"f:name":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-07 22:12:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:status":{"f:active":{},"f:ready":{},"f:startTime":{},"f:uncountedTerminatedPods":{}}} Subresource:status}]} Spec:{Parallelism:0xc003050550 Completions:0xc003050554 ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc0030505d8 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: fd0bf32b-886d-4253-8402-8fdc042a8009,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:fd0bf32b-886d-4253-8402-8fdc042a8009 job-name:replace-27970452] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc0030505d0 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0014fe310 Suspend:0xc0030505fa} Status:{Conditions:[] StartTime:2023-03-07 22:12:00 +0000 UTC CompletionTime:<nil> Active:1 Succeeded:0 Failed:0 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc0030505fc}}] In [It] at: test/e2e/apps/cronjob.go:185 @ 03/07/23 22:12:01.578 < Exit [It] should replace jobs when ReplaceConcurrent [Conformance] - test/e2e/apps/cronjob.go:161 @ 03/07/23 22:12:01.578 (1m16.039s) > Enter [AfterEach] [sig-apps] CronJob - test/e2e/framework/node/init/init.go:33 @ 03/07/23 22:12:01.578 Mar 7 22:12:01.578: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-apps] CronJob - test/e2e/framework/node/init/init.go:33 @ 03/07/23 22:12:01.582 (4ms) > Enter [DeferCleanup (Each)] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 22:12:01.582 < Exit [DeferCleanup (Each)] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 22:12:01.582 (0s) > Enter [DeferCleanup (Each)] [sig-apps] CronJob - dump namespaces | framework.go:209 @ 03/07/23 22:12:01.582 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 22:12:01.582 STEP: Collecting events from namespace "cronjob-5668". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 22:12:01.582 STEP: Found 9 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 22:12:01.586 Mar 7 22:12:01.586: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for replace-27970451-9p9x6: { } Scheduled: Successfully assigned cronjob-5668/replace-27970451-9p9x6 to 172.17.0.1 Mar 7 22:12:01.586: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for replace-27970452-zn977: { } Scheduled: Successfully assigned cronjob-5668/replace-27970452-zn977 to 172.17.0.1 Mar 7 22:12:01.586: INFO: At 2023-03-07 22:11:00 +0000 UTC - event for replace: {cronjob-controller } SuccessfulCreate: Created job replace-27970451 Mar 7 22:12:01.586: INFO: At 2023-03-07 22:11:00 +0000 UTC - event for replace-27970451: {job-controller } SuccessfulCreate: Created pod: replace-27970451-9p9x6 Mar 7 22:12:01.586: INFO: At 2023-03-07 22:11:08 +0000 UTC - event for replace: {cronjob-controller } SawCompletedJob: Saw completed job: replace-27970451, status: Failed Mar 7 22:12:01.586: INFO: At 2023-03-07 22:11:08 +0000 UTC - event for replace-27970451: {job-controller } SuccessfulDelete: Deleted pod: replace-27970451-9p9x6 Mar 7 22:12:01.586: INFO: At 2023-03-07 22:11:08 +0000 UTC - event for replace-27970451: {job-controller } BackoffLimitExceeded: Job has reached the specified backoff limit Mar 7 22:12:01.586: INFO: At 2023-03-07 22:12:00 +0000 UTC - event for replace: {cronjob-controller } SuccessfulCreate: Created job replace-27970452 Mar 7 22:12:01.586: INFO: At 2023-03-07 22:12:00 +0000 UTC - event for replace-27970452: {job-controller } SuccessfulCreate: Created pod: replace-27970452-zn977 Mar 7 22:12:01.589: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 22:12:01.589: INFO: replace-27970452-zn977 172.17.0.1 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 22:12:00 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 22:12:00 +0000 UTC ContainersNotReady containers with unready status: [c]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 22:12:00 +0000 UTC ContainersNotReady containers with unready status: [c]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 22:12:00 +0000 UTC }] Mar 7 22:12:01.589: INFO: Mar 7 22:12:01.605: INFO: Unable to fetch cronjob-5668/replace-27970452-zn977/c logs: the server rejected our request for an unknown reason (get pods replace-27970452-zn977) Mar 7 22:12:01.608: INFO: Logging node info for node 172.17.0.1 Mar 7 22:12:01.612: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 10125 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 22:09:25 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 22:12:01.612: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 22:12:01.616: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 22:12:01.623: INFO: replace-27970452-zn977 started at 2023-03-07 22:12:00 +0000 UTC (0+1 container statuses recorded) Mar 7 22:12:01.623: INFO: Container c ready: false, restart count 0 Mar 7 22:12:01.623: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 22:12:01.623: INFO: Container coredns ready: false, restart count 19 Mar 7 22:12:01.657: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 22:12:01.657 (75ms) < Exit [DeferCleanup (Each)] [sig-apps] CronJob - dump namespaces | framework.go:209 @ 03/07/23 22:12:01.657 (75ms) > Enter [DeferCleanup (Each)] [sig-apps] CronJob - tear down framework | framework.go:206 @ 03/07/23 22:12:01.657 STEP: Destroying namespace "cronjob-5668" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 22:12:01.657 < Exit [DeferCleanup (Each)] [sig-apps] CronJob - tear down framework | framework.go:206 @ 03/07/23 22:12:01.662 (5ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 22:12:01.663 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 22:12:01.663 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sDeployment\sdeployment\sshould\ssupport\sproportional\sscaling\s\[Conformance\]$'
[FAILED] error waiting for deployment "webserver-deployment" status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} In [It] at: test/e2e/apps/deployment.go:1215 @ 03/07/23 21:27:10.732from junit_01.xml
> Enter [BeforeEach] [sig-apps] Deployment - set up framework | framework.go:191 @ 03/07/23 21:21:58.653 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:21:58.653 Mar 7 21:21:58.653: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename deployment - test/e2e/framework/framework.go:250 @ 03/07/23 21:21:58.654 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:21:58.667 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:21:58.671 < Exit [BeforeEach] [sig-apps] Deployment - set up framework | framework.go:191 @ 03/07/23 21:21:58.675 (22ms) > Enter [BeforeEach] [sig-apps] Deployment - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:21:58.675 < Exit [BeforeEach] [sig-apps] Deployment - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:21:58.675 (0s) > Enter [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 @ 03/07/23 21:21:58.675 < Exit [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 @ 03/07/23 21:21:58.675 (0s) > Enter [It] deployment should support proportional scaling [Conformance] - test/e2e/apps/deployment.go:160 @ 03/07/23 21:21:58.675 Mar 7 21:21:58.675: INFO: Creating deployment "webserver-deployment" Mar 7 21:21:58.681: INFO: Waiting for observed generation 1 Mar 7 21:22:00.689: INFO: Waiting for all required pods to come up Mar 7 21:22:00.692: INFO: Pod name httpd: Found 10 pods out of 10 STEP: ensuring each pod is running - test/e2e/framework/pod/resource.go:125 @ 03/07/23 21:22:00.692 Mar 7 21:22:10.721: INFO: Waiting for deployment "webserver-deployment" to complete Mar 7 21:22:10.724: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:9, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 8, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:12.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:6, AvailableReplicas:6, UnavailableReplicas:4, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 12, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:14.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:7, AvailableReplicas:7, UnavailableReplicas:3, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 13, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:16.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:3, AvailableReplicas:3, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 13, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:18.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 13, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:20.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 13, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:22.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:2, AvailableReplicas:2, UnavailableReplicas:8, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 22, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:24.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:5, AvailableReplicas:5, UnavailableReplicas:5, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 14, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 24, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:26.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:9, AvailableReplicas:9, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 25, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:28.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:7, AvailableReplicas:7, UnavailableReplicas:3, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:30.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:2, AvailableReplicas:2, UnavailableReplicas:8, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:32.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:34.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:36.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:38.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:40.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:42.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:44.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 26, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}, v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:46.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:3, AvailableReplicas:3, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 46, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:48.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:6, AvailableReplicas:6, UnavailableReplicas:4, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 47, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:50.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:3, AvailableReplicas:3, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 47, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:52.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 47, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:54.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:9, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:56.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:9, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:22:58.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:00.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:02.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:04.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:06.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:08.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:10.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:12.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:14.733: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:16.730: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:18.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:20.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:22.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:24.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:26.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 24, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:28.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:6, AvailableReplicas:6, UnavailableReplicas:4, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:30.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:2, AvailableReplicas:2, UnavailableReplicas:8, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:32.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:34.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:36.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:38.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:40.730: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:42.731: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:44.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:46.738: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:48.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:50.732: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:52.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:54.731: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:56.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:23:58.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:00.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:02.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:04.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:06.730: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:08.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:10.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 23, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:12.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:14.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:16.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:18.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:20.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:22.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:24.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:26.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:28.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:30.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:32.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:34.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:36.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:38.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:40.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:42.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:44.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:46.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:48.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 10, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:50.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:9, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 49, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:52.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:2, AvailableReplicas:2, UnavailableReplicas:8, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 51, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:54.732: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:3, AvailableReplicas:3, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:56.730: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:24:58.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:00.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 24, 53, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:02.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:9, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:04.732: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:9, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:06.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:08.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:10.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:12.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:14.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:16.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:18.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:20.730: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:22.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:24.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:26.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:28.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:30.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:32.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:34.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:36.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:38.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:40.730: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:42.730: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:44.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:46.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:48.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:50.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:52.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:54.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:56.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:25:58.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:00.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:02.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:04.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:06.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:08.738: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:10.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:12.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:14.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:16.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:18.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:20.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:22.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:24.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:26.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:28.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:30.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:32.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:34.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:36.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:38.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:40.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:42.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:44.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:46.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:48.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:50.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 25, 1, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:52.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:9, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:54.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:26:56.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Automatically polling progress: [sig-apps] Deployment deployment should support proportional scaling [Conformance] (Spec Runtime: 5m0.023s) test/e2e/apps/deployment.go:160 In [It] (Node Runtime: 5m0.001s) test/e2e/apps/deployment.go:160 At [By Step] ensuring each pod is running (Step Runtime: 4m57.983s) test/e2e/framework/pod/resource.go:125 Spec Goroutine goroutine 2794 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f24218, 0xc00012c000}, 0xc0043bbb78, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f24218, 0xc00012c000}, 0xc8?, 0x2f82c25?, 0x1?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f24218, 0xc00012c000}, 0x0?, 0xc001d67c18?, 0x25944a7?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1?, 0xc001d67c98?, 0x25940c8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:520 k8s.io/kubernetes/test/utils.waitForDeploymentCompleteMaybeCheckRolling({0x7f62698?, 0xc003e8aea0}, 0xc0005b5400, 0x0, 0x7836178, 0x1?, 0x1?) test/utils/deployment.go:82 k8s.io/kubernetes/test/utils.WaitForDeploymentComplete(...) test/utils/deployment.go:201 k8s.io/kubernetes/test/e2e/framework/deployment.WaitForDeploymentComplete(...) test/e2e/framework/deployment/wait.go:46 > k8s.io/kubernetes/test/e2e/apps.testProportionalScalingDeployment({0x7f07c801c938, 0xc003578980}, 0x7ee9800?) test/e2e/apps/deployment.go:1214 | | framework.Logf("Waiting for deployment %q to complete", deployment.Name) > err = e2edeployment.WaitForDeploymentComplete(c, deployment) | framework.ExpectNoError(err) | > k8s.io/kubernetes/test/e2e/apps.glob..func5.11({0x7f07c801c938?, 0xc003578980?}) test/e2e/apps/deployment.go:161 | */ | framework.ConformanceIt("deployment should support proportional scaling", func(ctx context.Context) { > testProportionalScalingDeployment(ctx, f) | }) | ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) { k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003578980}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:26:58.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:27:00.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:27:02.729: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:27:04.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:27:06.727: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:27:08.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:27:10.728: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:27:10.731: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} Mar 7 21:27:10.731: INFO: Unexpected error: <*errors.errorString | 0xc0017b1480>: { s: "error waiting for deployment \"webserver-deployment\" status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:\"Available\", Status:\"False\", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:\"MinimumReplicasUnavailable\", Message:\"Deployment does not have minimum availability.\"}, v1.DeploymentCondition{Type:\"Progressing\", Status:\"True\", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:\"ReplicaSetUpdated\", Message:\"ReplicaSet \\\"webserver-deployment-67bd4bf6dc\\\" is progressing.\"}}, CollisionCount:(*int32)(nil)}", } [FAILED] error waiting for deployment "webserver-deployment" status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:10, UpdatedReplicas:10, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:10, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 22, 28, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.March, 7, 21, 26, 52, 0, time.Local), LastTransitionTime:time.Date(2023, time.March, 7, 21, 21, 58, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"webserver-deployment-67bd4bf6dc\" is progressing."}}, CollisionCount:(*int32)(nil)} In [It] at: test/e2e/apps/deployment.go:1215 @ 03/07/23 21:27:10.732 < Exit [It] deployment should support proportional scaling [Conformance] - test/e2e/apps/deployment.go:160 @ 03/07/23 21:27:10.732 (5m12.057s) > Enter [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 @ 03/07/23 21:27:10.732 Mar 7 21:27:10.734: INFO: Deployment "webserver-deployment": &Deployment{ObjectMeta:{webserver-deployment deployment-1930 8a525d50-e105-4958-bd9f-9bcec6c03ed3 4277 1 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-03-07 21:26:53 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*10,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc001f4dd18 <nil> ClusterFirst map[] <nil> false false false <nil> &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil <nil> [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:10,UpdatedReplicas:10,AvailableReplicas:0,UnavailableReplicas:10,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2023-03-07 21:22:28 +0000 UTC,LastTransitionTime:2023-03-07 21:22:28 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "webserver-deployment-67bd4bf6dc" is progressing.,LastUpdateTime:2023-03-07 21:26:52 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} Mar 7 21:27:10.737: INFO: New ReplicaSet "webserver-deployment-67bd4bf6dc" of Deployment "webserver-deployment": &ReplicaSet{ObjectMeta:{webserver-deployment-67bd4bf6dc deployment-1930 2769d251-3159-4715-b4e7-0396e880a33b 4276 1 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[deployment.kubernetes.io/desired-replicas:10 deployment.kubernetes.io/max-replicas:13 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver-deployment 8a525d50-e105-4958-bd9f-9bcec6c03ed3 0xc001decb57 0xc001decb58}] [] [{kube-controller-manager Update apps/v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8a525d50-e105-4958-bd9f-9bcec6c03ed3\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-03-07 21:26:52 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*10,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 67bd4bf6dc,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc001decbe8 <nil> ClusterFirst map[] <nil> false false false <nil> &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil <nil> [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:10,FullyLabeledReplicas:10,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} Mar 7 21:27:10.741: INFO: Pod "webserver-deployment-67bd4bf6dc-6qg5t" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-6qg5t webserver-deployment-67bd4bf6dc- deployment-1930 260847ab-df66-48ac-aaff-0080d14eb590 4334 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001decf90 0xc001decf91}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:08 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.243\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4f3\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-nw6s2,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nw6s2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:50 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:50 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.243,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-6qg5t_deployment-1930(260847ab-df66-48ac-aaff-0080d14eb590),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:48 +0000 UTC,FinishedAt:2023-03-07 21:24:49 +0000 UTC,ContainerID:containerd://74173ce347b7bc9b424034bfe2264ec423eb0a16e7c0f0fefae674f18cfdf82c,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://74173ce347b7bc9b424034bfe2264ec423eb0a16e7c0f0fefae674f18cfdf82c,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.243,},PodIP{IP:2001:4860:4860::4f3,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.741: INFO: Pod "webserver-deployment-67bd4bf6dc-b72f7" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-b72f7 webserver-deployment-67bd4bf6dc- deployment-1930 2d066e1e-9617-4400-b7ae-a440492fb2a6 4326 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001ded180 0xc001ded181}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.237\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4ed\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-6rdjb,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6rdjb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.237,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-b72f7_deployment-1930(2d066e1e-9617-4400-b7ae-a440492fb2a6),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:50 +0000 UTC,FinishedAt:2023-03-07 21:24:51 +0000 UTC,ContainerID:containerd://74e8b7268bd49d8db91a2ed866903bc8007eb88a3a24a57390c7237fb7f2dae5,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://74e8b7268bd49d8db91a2ed866903bc8007eb88a3a24a57390c7237fb7f2dae5,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.237,},PodIP{IP:2001:4860:4860::4ed,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.741: INFO: Pod "webserver-deployment-67bd4bf6dc-cbd76" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-cbd76 webserver-deployment-67bd4bf6dc- deployment-1930 8d89d3e8-21cf-49e1-92b5-5a854213f184 4333 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001ded370 0xc001ded371}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:08 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.246\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4f6\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-l68kl,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l68kl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:50 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:50 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.246,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-cbd76_deployment-1930(8d89d3e8-21cf-49e1-92b5-5a854213f184),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:48 +0000 UTC,FinishedAt:2023-03-07 21:24:49 +0000 UTC,ContainerID:containerd://bf80e75910442edf083ac0e11b66ece1dd431f6c10bdb6d894c3dbae05faf4ca,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://bf80e75910442edf083ac0e11b66ece1dd431f6c10bdb6d894c3dbae05faf4ca,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.246,},PodIP{IP:2001:4860:4860::4f6,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.742: INFO: Pod "webserver-deployment-67bd4bf6dc-hp7w8" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-hp7w8 webserver-deployment-67bd4bf6dc- deployment-1930 c74023a5-c726-44d3-b80d-0744161cfa95 4320 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001ded570 0xc001ded571}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.236\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4ec\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-78m4p,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-78m4p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:26:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:26:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.236,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 5m0s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-hp7w8_deployment-1930(c74023a5-c726-44d3-b80d-0744161cfa95),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:26:50 +0000 UTC,FinishedAt:2023-03-07 21:26:51 +0000 UTC,ContainerID:containerd://d67441f02fed65b159b38cdff21250a2dd0b592d4b8b025cf2aa36b8b5b0c6d7,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://d67441f02fed65b159b38cdff21250a2dd0b592d4b8b025cf2aa36b8b5b0c6d7,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.236,},PodIP{IP:2001:4860:4860::4ec,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.742: INFO: Pod "webserver-deployment-67bd4bf6dc-jdkgs" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-jdkgs webserver-deployment-67bd4bf6dc- deployment-1930 962477f7-c298-4264-84e5-613aaa549fda 4328 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001ded770 0xc001ded771}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.240\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4f0\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-mkpcr,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mkpcr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.240,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-jdkgs_deployment-1930(962477f7-c298-4264-84e5-613aaa549fda),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:50 +0000 UTC,FinishedAt:2023-03-07 21:24:51 +0000 UTC,ContainerID:containerd://b01959f226aee614f639571ff70d5d1911b037d9c3e61fd7f39f1946442cf239,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://b01959f226aee614f639571ff70d5d1911b037d9c3e61fd7f39f1946442cf239,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.240,},PodIP{IP:2001:4860:4860::4f0,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.742: INFO: Pod "webserver-deployment-67bd4bf6dc-lpjns" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-lpjns webserver-deployment-67bd4bf6dc- deployment-1930 8ba7ec50-2dae-49aa-8e64-83d6e63edfaa 4324 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001ded960 0xc001ded961}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.241\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4f1\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-fkqzn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fkqzn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.241,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-lpjns_deployment-1930(8ba7ec50-2dae-49aa-8e64-83d6e63edfaa),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:50 +0000 UTC,FinishedAt:2023-03-07 21:24:51 +0000 UTC,ContainerID:containerd://8ca93497edbd5a57682aea7bd70924bf08f9b4dbb49c30681f181f35521349d9,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://8ca93497edbd5a57682aea7bd70924bf08f9b4dbb49c30681f181f35521349d9,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.241,},PodIP{IP:2001:4860:4860::4f1,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.742: INFO: Pod "webserver-deployment-67bd4bf6dc-rbzhv" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-rbzhv webserver-deployment-67bd4bf6dc- deployment-1930 8dae681c-d2c8-4c2f-8109-9c786fbfbd30 4335 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001dedb60 0xc001dedb61}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:09 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.251\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4fb\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-m4lkk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m4lkk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.251,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-rbzhv_deployment-1930(8dae681c-d2c8-4c2f-8109-9c786fbfbd30),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:50 +0000 UTC,FinishedAt:2023-03-07 21:24:51 +0000 UTC,ContainerID:containerd://3138c933d727216877846a8542eba85a26aa9eccf59f5cbafa102ba0226feea2,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://3138c933d727216877846a8542eba85a26aa9eccf59f5cbafa102ba0226feea2,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.251,},PodIP{IP:2001:4860:4860::4fb,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.742: INFO: Pod "webserver-deployment-67bd4bf6dc-skkhm" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-skkhm webserver-deployment-67bd4bf6dc- deployment-1930 e2280a7d-2dd4-42eb-9123-e7ed6386300c 4329 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001dedd60 0xc001dedd61}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:08 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.244\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4f4\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-qwwlf,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qwwlf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:52 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.244,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-skkhm_deployment-1930(e2280a7d-2dd4-42eb-9123-e7ed6386300c),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:50 +0000 UTC,FinishedAt:2023-03-07 21:24:51 +0000 UTC,ContainerID:containerd://1212781ef6e8cc43dd75b3cef9aaf47603b5249d89063bab956f884279c72dbf,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://1212781ef6e8cc43dd75b3cef9aaf47603b5249d89063bab956f884279c72dbf,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.244,},PodIP{IP:2001:4860:4860::4f4,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.742: INFO: Pod "webserver-deployment-67bd4bf6dc-tt5cs" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-tt5cs webserver-deployment-67bd4bf6dc- deployment-1930 b000d049-e9b2-4cd3-b882-4b517ed8171c 4323 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001dedf50 0xc001dedf51}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.242\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4f2\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-sn2sq,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sn2sq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:50 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:24:50 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.242,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-tt5cs_deployment-1930(b000d049-e9b2-4cd3-b882-4b517ed8171c),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:48 +0000 UTC,FinishedAt:2023-03-07 21:24:49 +0000 UTC,ContainerID:containerd://00dbe79b33bdefea86af001cf9ec54f25adc188d3a98db990e2db207a68354ed,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://00dbe79b33bdefea86af001cf9ec54f25adc188d3a98db990e2db207a68354ed,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.242,},PodIP{IP:2001:4860:4860::4f2,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} Mar 7 21:27:10.742: INFO: Pod "webserver-deployment-67bd4bf6dc-v2f6d" is not available: &Pod{ObjectMeta:{webserver-deployment-67bd4bf6dc-v2f6d webserver-deployment-67bd4bf6dc- deployment-1930 a528ccd4-5a83-4f19-ba50-96a32c019c4e 4325 0 2023-03-07 21:21:58 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:67bd4bf6dc] map[] [{apps/v1 ReplicaSet webserver-deployment-67bd4bf6dc 2769d251-3159-4715-b4e7-0396e880a33b 0xc001ee6140 0xc001ee6141}] [] [{kube-controller-manager Update v1 2023-03-07 21:21:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2769d251-3159-4715-b4e7-0396e880a33b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-03-07 21:27:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.88.4.235\"}":{".":{},"f:ip":{}},"k:{\"ip\":\"2001:4860:4860::4eb\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-bnqj6,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bnqj6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:172.17.0.1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:25:00 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:25:00 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-03-07 21:21:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.17.0.1,PodIP:10.88.4.235,StartTime:2023-03-07 21:21:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:CrashLoopBackOff,Message:back-off 2m40s restarting failed container=httpd pod=webserver-deployment-67bd4bf6dc-v2f6d_deployment-1930(a528ccd4-5a83-4f19-ba50-96a32c019c4e),},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:&ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2023-03-07 21:24:58 +0000 UTC,FinishedAt:2023-03-07 21:24:59 +0000 UTC,ContainerID:containerd://14b3304e8fe4b8334a653613a40d959fdf304193ebf051c673892b3a49282d9f,},},Ready:false,RestartCount:5,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:containerd://14b3304e8fe4b8334a653613a40d959fdf304193ebf051c673892b3a49282d9f,Started:*false,ResourcesAllocated:ResourceList{},Resources:nil,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.88.4.235,},PodIP{IP:2001:4860:4860::4eb,},},EphemeralContainerStatuses:[]ContainerStatus{},Resize:,},} < Exit [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 @ 03/07/23 21:27:10.742 (11ms) > Enter [AfterEach] [sig-apps] Deployment - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:27:10.742 Mar 7 21:27:10.742: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-apps] Deployment - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:27:10.745 (3ms) > Enter [DeferCleanup (Each)] [sig-apps] Deployment - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:27:10.745 < Exit [DeferCleanup (Each)] [sig-apps] Deployment - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:27:10.745 (0s) > Enter [DeferCleanup (Each)] [sig-apps] Deployment - dump namespaces | framework.go:209 @ 03/07/23 21:27:10.745 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:27:10.746 STEP: Collecting events from namespace "deployment-1930". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:27:10.746 STEP: Found 73 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:27:10.75 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-6qg5t: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-6qg5t to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-b72f7: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-b72f7 to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-cbd76: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-cbd76 to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-hp7w8: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-hp7w8 to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-jdkgs: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-jdkgs to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-lpjns: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-lpjns to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-rbzhv: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-rbzhv to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-skkhm: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-skkhm to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-tt5cs: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-tt5cs to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-deployment-67bd4bf6dc-v2f6d: { } Scheduled: Successfully assigned deployment-1930/webserver-deployment-67bd4bf6dc-v2f6d to 172.17.0.1 Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment: {deployment-controller } ScalingReplicaSet: Scaled up replica set webserver-deployment-67bd4bf6dc to 10 Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-lpjns Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-tt5cs Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-v2f6d Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-jdkgs Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-rbzhv Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-hp7w8 Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-b72f7 Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: (combined from similar events): Created pod: webserver-deployment-67bd4bf6dc-6qg5t Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-skkhm Mar 7 21:27:10.750: INFO: At 2023-03-07 21:21:58 +0000 UTC - event for webserver-deployment-67bd4bf6dc: {replicaset-controller } SuccessfulCreate: Created pod: webserver-deployment-67bd4bf6dc-cbd76 Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-b72f7: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-b72f7: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-b72f7: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-cbd76: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-cbd76: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-cbd76: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-hp7w8: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-hp7w8: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-hp7w8: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-rbzhv: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-rbzhv: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-rbzhv: {kubelet 172.17.0.1} Failed: Error: failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to create new parent process: namespace path: lstat /proc/302570/ns/ipc: no such file or directory: unknown Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-tt5cs: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-tt5cs: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:01 +0000 UTC - event for webserver-deployment-67bd4bf6dc-tt5cs: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-6qg5t: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-6qg5t: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-6qg5t: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-jdkgs: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-jdkgs: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-jdkgs: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-lpjns: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-lpjns: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-lpjns: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-rbzhv: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-tt5cs: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-v2f6d: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-v2f6d: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:02 +0000 UTC - event for webserver-deployment-67bd4bf6dc-v2f6d: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:03 +0000 UTC - event for webserver-deployment-67bd4bf6dc-rbzhv: {kubelet 172.17.0.1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container task "8b28f698c59c9326ad439713b7426400bf1af57d13e8fa83f78fc441f0722788": OCI runtime start failed: cannot start a container that has stopped: unknown Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:03 +0000 UTC - event for webserver-deployment-67bd4bf6dc-skkhm: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:03 +0000 UTC - event for webserver-deployment-67bd4bf6dc-skkhm: {kubelet 172.17.0.1} Created: Created container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:03 +0000 UTC - event for webserver-deployment-67bd4bf6dc-skkhm: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:04 +0000 UTC - event for webserver-deployment-67bd4bf6dc-6qg5t: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:04 +0000 UTC - event for webserver-deployment-67bd4bf6dc-b72f7: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:04 +0000 UTC - event for webserver-deployment-67bd4bf6dc-cbd76: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:04 +0000 UTC - event for webserver-deployment-67bd4bf6dc-hp7w8: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:04 +0000 UTC - event for webserver-deployment-67bd4bf6dc-jdkgs: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:04 +0000 UTC - event for webserver-deployment-67bd4bf6dc-lpjns: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:04 +0000 UTC - event for webserver-deployment-67bd4bf6dc-skkhm: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:04 +0000 UTC - event for webserver-deployment-67bd4bf6dc-v2f6d: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:06 +0000 UTC - event for webserver-deployment-67bd4bf6dc-rbzhv: {kubelet 172.17.0.1} Started: Started container httpd Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:08 +0000 UTC - event for webserver-deployment-67bd4bf6dc-tt5cs: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-tt5cs_deployment-1930(b000d049-e9b2-4cd3-b882-4b517ed8171c) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-6qg5t: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-6qg5t_deployment-1930(260847ab-df66-48ac-aaff-0080d14eb590) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-b72f7: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-b72f7_deployment-1930(2d066e1e-9617-4400-b7ae-a440492fb2a6) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-cbd76: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-cbd76_deployment-1930(8d89d3e8-21cf-49e1-92b5-5a854213f184) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-hp7w8: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-hp7w8_deployment-1930(c74023a5-c726-44d3-b80d-0744161cfa95) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-jdkgs: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-jdkgs_deployment-1930(962477f7-c298-4264-84e5-613aaa549fda) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-lpjns: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-lpjns_deployment-1930(8ba7ec50-2dae-49aa-8e64-83d6e63edfaa) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-rbzhv: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-rbzhv_deployment-1930(8dae681c-d2c8-4c2f-8109-9c786fbfbd30) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-skkhm: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-skkhm_deployment-1930(e2280a7d-2dd4-42eb-9123-e7ed6386300c) Mar 7 21:27:10.750: INFO: At 2023-03-07 21:22:10 +0000 UTC - event for webserver-deployment-67bd4bf6dc-v2f6d: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container httpd in pod webserver-deployment-67bd4bf6dc-v2f6d_deployment-1930(a528ccd4-5a83-4f19-ba50-96a32c019c4e) Mar 7 21:27:10.754: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-6qg5t 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:50 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:50 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-b72f7 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-cbd76 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:50 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:50 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-hp7w8 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:26:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:26:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-jdkgs 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-lpjns 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-rbzhv 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-skkhm 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:52 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-tt5cs 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:50 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:24:50 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: webserver-deployment-67bd4bf6dc-v2f6d 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:25:00 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:25:00 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:21:58 +0000 UTC }] Mar 7 21:27:10.754: INFO: Mar 7 21:27:10.793: INFO: Logging node info for node 172.17.0.1 Mar 7 21:27:10.795: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 3308 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:22:40 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:22:40 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:22:40 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:22:40 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:22:40 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:27:10.796: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:27:10.798: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-v2f6d started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-hp7w8 started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container coredns ready: false, restart count 10 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-tt5cs started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-skkhm started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-b72f7 started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-rbzhv started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-jdkgs started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-cbd76 started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-lpjns started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.805: INFO: webserver-deployment-67bd4bf6dc-6qg5t started at 2023-03-07 21:21:58 +0000 UTC (0+1 container statuses recorded) Mar 7 21:27:10.805: INFO: Container httpd ready: false, restart count 5 Mar 7 21:27:10.839: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:27:10.839 (94ms) < Exit [DeferCleanup (Each)] [sig-apps] Deployment - dump namespaces | framework.go:209 @ 03/07/23 21:27:10.839 (94ms) > Enter [DeferCleanup (Each)] [sig-apps] Deployment - tear down framework | framework.go:206 @ 03/07/23 21:27:10.839 STEP: Destroying namespace "deployment-1930" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:27:10.84 < Exit [DeferCleanup (Each)] [sig-apps] Deployment - tear down framework | framework.go:206 @ 03/07/23 21:27:10.845 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:27:10.846 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:27:10.846 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sStatefulSet\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sshould\sperform\srolling\supdates\sand\sroll\sbacks\sof\stemplate\smodifications\s\[Conformance\]$'
[FAILED] Failed waiting for pods to enter running: timed out waiting for the condition In [It] at: test/e2e/framework/statefulset/wait.go:58 @ 03/07/23 22:09:01.83from junit_01.xml
> Enter [BeforeEach] [sig-apps] StatefulSet - set up framework | framework.go:191 @ 03/07/23 21:59:01.781 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:59:01.781 Mar 7 21:59:01.781: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename statefulset - test/e2e/framework/framework.go:250 @ 03/07/23 21:59:01.782 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:59:01.795 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:59:01.801 < Exit [BeforeEach] [sig-apps] StatefulSet - set up framework | framework.go:191 @ 03/07/23 21:59:01.806 (25ms) > Enter [BeforeEach] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:59:01.806 < Exit [BeforeEach] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:59:01.806 (0s) > Enter [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:100 @ 03/07/23 21:59:01.806 < Exit [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:100 @ 03/07/23 21:59:01.806 (0s) > Enter [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:115 @ 03/07/23 21:59:01.806 STEP: Creating service test in namespace statefulset-8368 - test/e2e/apps/statefulset.go:120 @ 03/07/23 21:59:01.806 < Exit [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:115 @ 03/07/23 21:59:01.813 (7ms) > Enter [It] should perform rolling updates and roll backs of template modifications [Conformance] - test/e2e/apps/statefulset.go:316 @ 03/07/23 21:59:01.813 STEP: Creating a new StatefulSet - test/e2e/apps/statefulset.go:317 @ 03/07/23 21:59:01.813 Mar 7 21:59:01.822: INFO: Found 0 stateful pods, waiting for 3 Mar 7 21:59:11.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 21:59:21.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 21:59:31.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 21:59:41.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 21:59:51.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:00:01.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:00:11.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:00:21.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:00:31.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:00:41.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:00:51.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:01:01.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:01:11.828: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:01:21.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:01:31.828: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:01:41.828: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:01:51.828: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:02:01.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:02:11.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:02:21.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:02:31.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:02:41.829: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:02:51.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:03:01.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:03:11.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:03:21.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:03:31.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:03:41.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:03:51.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 5m0.033s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 5m0.001s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 5m0.001s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:04:01.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:04:11.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 5m20.035s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 5m20.003s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 5m20.003s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:04:21.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:04:31.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 5m40.037s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 5m40.005s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 5m40.005s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:04:41.828: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:04:51.828: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 6m0.038s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 6m0.006s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 6m0.006s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:05:01.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:05:11.828: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 6m20.04s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 6m20.008s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 6m20.008s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:05:21.828: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:05:31.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 6m40.042s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 6m40.01s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 6m40.01s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:05:41.830: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:05:51.826: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 7m0.043s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 7m0.011s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 7m0.011s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc00061b800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00061b800, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00061b800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00061b800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc001cb86c0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00449e080, 0xc00061b100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00061b100, {0x7ef2ae0, 0xc00449e080}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00340a210, 0xc00061b100, {0x100?, 0xc0032f6000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00340a210, 0xc00061b100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0033cc5a0, {0x7f07c801c938, 0xc00448ca80}, 0x2?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0033cc5a0, {0x7f07c801c938, 0xc00448ca80}) vendor/k8s.io/client-go/rest/request.go:1039 k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/core/v1.(*pods).List(0xc000358160, {0x7f07c801c938, 0xc00448ca80}, {{{0x0, 0x0}, {0x0, 0x0}}, {0xc00393c520, 0x10}, {0x0, ...}, ...}) vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go:99 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x7f07c801c938, 0xc00448ca80}, {0x7f62698, 0xc0013ac820}, 0xc0007cf900) test/e2e/framework/statefulset/rest.go:68 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1({0x7f07c801c938?, 0xc00448ca80?}) test/e2e/framework/statefulset/wait.go:37 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00448ca80?}, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:06:01.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:06:11.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 7m20.045s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 7m20.013s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 7m20.013s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [runnable] io.ReadAll({0x7f07c80210b0, 0xc002328900}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/io/io.go:699 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).transformResponse(0xc002806d80, 0xc003876000, 0xc0017f7100) vendor/k8s.io/client-go/rest/request.go:1074 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do.func1(0xc00448c580?, 0x0?) vendor/k8s.io/client-go/rest/request.go:1040 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request.func3.1(...) vendor/k8s.io/client-go/rest/request.go:1015 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request.func3(0xc003876000, 0xc00139d330, {0x7f26898?, 0xc00448c580?}, 0x0?, 0x0?, 0x66d0f20?, {0x0?, 0x0?}, 0x783ad28) vendor/k8s.io/client-go/rest/request.go:1022 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc002806d80, {0x7f07c801c938, 0xc00448ca80}, 0x2?) vendor/k8s.io/client-go/rest/request.go:1024 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc002806d80, {0x7f07c801c938, 0xc00448ca80}) vendor/k8s.io/client-go/rest/request.go:1039 k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/core/v1.(*pods).List(0xc000fba5c0, {0x7f07c801c938, 0xc00448ca80}, {{{0x0, 0x0}, {0x0, 0x0}}, {0xc003557d00, 0x10}, {0x0, ...}, ...}) vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go:99 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x7f07c801c938, 0xc00448ca80}, {0x7f62698, 0xc0013ac820}, 0xc0007cf900) test/e2e/framework/statefulset/rest.go:68 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1({0x7f07c801c938?, 0xc00448ca80?}) test/e2e/framework/statefulset/wait.go:37 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00448ca80?}, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:06:21.828: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:06:31.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:06:41.826: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 7m40.047s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 7m40.015s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 7m40.015s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:06:51.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:07:01.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 8m0.048s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 8m0.016s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 8m0.016s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:07:11.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:07:21.826: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 8m20.049s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 8m20.017s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 8m20.017s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:07:31.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:07:41.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 8m40.051s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 8m40.019s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 8m40.019s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:07:51.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:08:01.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 9m0.052s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 9m0.02s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 9m0.02s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:08:11.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:08:21.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 9m20.054s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 9m20.022s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 9m20.022s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:08:31.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:08:41.827: INFO: Found 2 stateful pods, waiting for 3 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] (Spec Runtime: 9m40.056s) test/e2e/apps/statefulset.go:316 In [It] (Node Runtime: 9m40.024s) test/e2e/apps/statefulset.go:316 At [By Step] Creating a new StatefulSet (Step Runtime: 9m40.024s) test/e2e/apps/statefulset.go:317 Spec Goroutine goroutine 6991 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00448ca80}, 0xc001b4d470, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0xc00448ca80?, 0xc0007cf900?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00448ca80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f07c801c938?, 0xc00448ca80?}, {0x7f62698?, 0xc0013ac820?}, 0x0?, 0x0?, 0x0?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.rollbackTest({0x7f07c801c938, 0xc00448ca80}, {0x7f62698?, 0xc0013ac820}, {0xc003051af0, 0x10}, 0xc0007cf400) test/e2e/apps/statefulset.go:1691 | ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | framework.ExpectNoError(err) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | ss = waitForStatus(ctx, c, ss) | currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.7({0x7f07c801c938, 0xc00448ca80}) test/e2e/apps/statefulset.go:319 | ginkgo.By("Creating a new StatefulSet") | ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) > rollbackTest(ctx, c, ns, ss) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00448ca80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:08:51.827: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:09:01.826: INFO: Found 2 stateful pods, waiting for 3 Mar 7 22:09:01.830: INFO: Found 2 stateful pods, waiting for 3 [FAILED] Failed waiting for pods to enter running: timed out waiting for the condition In [It] at: test/e2e/framework/statefulset/wait.go:58 @ 03/07/23 22:09:01.83 < Exit [It] should perform rolling updates and roll backs of template modifications [Conformance] - test/e2e/apps/statefulset.go:316 @ 03/07/23 22:09:01.83 (10m0.017s) > Enter [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:126 @ 03/07/23 22:09:01.831 Mar 7 22:09:01.834: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=statefulset-8368 describe po ss2-0' Mar 7 22:09:01.947: INFO: stderr: "" Mar 7 22:09:01.947: INFO: stdout: "Name: ss2-0\nNamespace: statefulset-8368\nPriority: 0\nService Account: default\nNode: 172.17.0.1/172.17.0.1\nStart Time: Tue, 07 Mar 2023 21:59:01 +0000\nLabels: baz=blah\n controller-revision-hash=ss2-7b6c9599d5\n foo=bar\n statefulset.kubernetes.io/pod-name=ss2-0\nAnnotations: <none>\nStatus: Running\nIP: 10.88.10.181\nIPs:\n IP: 10.88.10.181\n IP: 2001:4860:4860::ab5\nControlled By: StatefulSet/ss2\nContainers:\n webserver:\n Container ID: containerd://4563d2f89d22c4f70e8844217a9ffeb07ea034003d1d6e9a631757e974e0d96e\n Image: registry.k8s.io/e2e-test-images/httpd:2.4.38-4\n Image ID: registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22\n Port: <none>\n Host Port: <none>\n State: Waiting\n Reason: CrashLoopBackOff\n Last State: Terminated\n Reason: Error\n Exit Code: 137\n Started: Tue, 07 Mar 2023 22:04:34 +0000\n Finished: Tue, 07 Mar 2023 22:04:35 +0000\n Ready: False\n Restart Count: 6\n Readiness: http-get http://:80/index.html delay=0s timeout=1s period=1s #success=1 #failure=1\n Environment: <none>\n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-h74gp (ro)\nConditions:\n Type Status\n Initialized True \n Ready False \n ContainersReady False \n PodScheduled True \nVolumes:\n kube-api-access-h74gp:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: <nil>\n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: <none>\nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 10m default-scheduler Successfully assigned statefulset-8368/ss2-0 to 172.17.0.1\n Normal Pulled 9m41s (x3 over 9m58s) kubelet Container image \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\" already present on machine\n Normal Created 9m41s (x3 over 9m58s) kubelet Created container webserver\n Normal Started 9m41s (x3 over 9m58s) kubelet Started container webserver\n Warning Unhealthy 9m39s kubelet Readiness probe failed: Get \"http://10.88.9.7:80/index.html\": dial tcp 10.88.9.7:80: connect: connection refused\n Warning BackOff 9m32s (x9 over 9m50s) kubelet Back-off restarting failed container webserver in pod ss2-0_statefulset-8368(5c7fd3b2-fa0c-48ed-822d-80e645f0b652)\n Normal SandboxChanged 4m57s (x73 over 9m55s) kubelet Pod sandbox changed, it will be killed and re-created.\n" Mar 7 22:09:01.947: INFO: Output of kubectl describe ss2-0: Name: ss2-0 Namespace: statefulset-8368 Priority: 0 Service Account: default Node: 172.17.0.1/172.17.0.1 Start Time: Tue, 07 Mar 2023 21:59:01 +0000 Labels: baz=blah controller-revision-hash=ss2-7b6c9599d5 foo=bar statefulset.kubernetes.io/pod-name=ss2-0 Annotations: <none> Status: Running IP: 10.88.10.181 IPs: IP: 10.88.10.181 IP: 2001:4860:4860::ab5 Controlled By: StatefulSet/ss2 Containers: webserver: Container ID: containerd://4563d2f89d22c4f70e8844217a9ffeb07ea034003d1d6e9a631757e974e0d96e Image: registry.k8s.io/e2e-test-images/httpd:2.4.38-4 Image ID: registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 Port: <none> Host Port: <none> State: Waiting Reason: CrashLoopBackOff Last State: Terminated Reason: Error Exit Code: 137 Started: Tue, 07 Mar 2023 22:04:34 +0000 Finished: Tue, 07 Mar 2023 22:04:35 +0000 Ready: False Restart Count: 6 Readiness: http-get http://:80/index.html delay=0s timeout=1s period=1s #success=1 #failure=1 Environment: <none> Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-h74gp (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: kube-api-access-h74gp: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: <nil> DownwardAPI: true QoS Class: BestEffort Node-Selectors: <none> Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 10m default-scheduler Successfully assigned statefulset-8368/ss2-0 to 172.17.0.1 Normal Pulled 9m41s (x3 over 9m58s) kubelet Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Normal Created 9m41s (x3 over 9m58s) kubelet Created container webserver Normal Started 9m41s (x3 over 9m58s) kubelet Started container webserver Warning Unhealthy 9m39s kubelet Readiness probe failed: Get "http://10.88.9.7:80/index.html": dial tcp 10.88.9.7:80: connect: connection refused Warning BackOff 9m32s (x9 over 9m50s) kubelet Back-off restarting failed container webserver in pod ss2-0_statefulset-8368(5c7fd3b2-fa0c-48ed-822d-80e645f0b652) Normal SandboxChanged 4m57s (x73 over 9m55s) kubelet Pod sandbox changed, it will be killed and re-created. Mar 7 22:09:01.947: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=statefulset-8368 logs ss2-0 --tail=100' Mar 7 22:09:02.037: INFO: stderr: "" Mar 7 22:09:02.037: INFO: stdout: "[Tue Mar 07 22:04:34.288145 2023] [mpm_event:notice] [pid 1:tid 140513310694248] AH00489: Apache/2.4.38 (Unix) configured -- resuming normal operations\n[Tue Mar 07 22:04:34.288223 2023] [core:notice] [pid 1:tid 140513310694248] AH00094: Command line: 'httpd -D FOREGROUND'\n10.88.0.1 - - [07/Mar/2023:22:04:34 +0000] \"GET /index.html HTTP/1.1\" 200 45\n10.88.0.1 - - [07/Mar/2023:22:04:35 +0000] \"GET /index.html HTTP/1.1\" 200 45\n" Mar 7 22:09:02.037: INFO: Last 100 log lines of ss2-0: [Tue Mar 07 22:04:34.288145 2023] [mpm_event:notice] [pid 1:tid 140513310694248] AH00489: Apache/2.4.38 (Unix) configured -- resuming normal operations [Tue Mar 07 22:04:34.288223 2023] [core:notice] [pid 1:tid 140513310694248] AH00094: Command line: 'httpd -D FOREGROUND' 10.88.0.1 - - [07/Mar/2023:22:04:34 +0000] "GET /index.html HTTP/1.1" 200 45 10.88.0.1 - - [07/Mar/2023:22:04:35 +0000] "GET /index.html HTTP/1.1" 200 45 Mar 7 22:09:02.037: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=statefulset-8368 describe po ss2-1' Mar 7 22:09:02.122: INFO: stderr: "" Mar 7 22:09:02.122: INFO: stdout: "Name: ss2-1\nNamespace: statefulset-8368\nPriority: 0\nService Account: default\nNode: 172.17.0.1/172.17.0.1\nStart Time: Tue, 07 Mar 2023 21:59:04 +0000\nLabels: baz=blah\n controller-revision-hash=ss2-7b6c9599d5\n foo=bar\n statefulset.kubernetes.io/pod-name=ss2-1\nAnnotations: <none>\nStatus: Running\nIP: 10.88.10.183\nIPs:\n IP: 10.88.10.183\n IP: 2001:4860:4860::ab7\nControlled By: StatefulSet/ss2\nContainers:\n webserver:\n Container ID: containerd://09abfaf673fac5c017a2a1735a8beabfeff065eb80a08bf7a5a0857195bef6d8\n Image: registry.k8s.io/e2e-test-images/httpd:2.4.38-4\n Image ID: registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22\n Port: <none>\n Host Port: <none>\n State: Waiting\n Reason: CrashLoopBackOff\n Last State: Terminated\n Reason: Error\n Exit Code: 137\n Started: Tue, 07 Mar 2023 22:04:38 +0000\n Finished: Tue, 07 Mar 2023 22:04:39 +0000\n Ready: False\n Restart Count: 6\n Readiness: http-get http://:80/index.html delay=0s timeout=1s period=1s #success=1 #failure=1\n Environment: <none>\n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-s6wgv (ro)\nConditions:\n Type Status\n Initialized True \n Ready False \n ContainersReady False \n PodScheduled True \nVolumes:\n kube-api-access-s6wgv:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: <nil>\n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: <none>\nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 9m58s default-scheduler Successfully assigned statefulset-8368/ss2-1 to 172.17.0.1\n Normal Pulled 9m40s (x3 over 9m56s) kubelet Container image \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\" already present on machine\n Normal Created 9m40s (x3 over 9m56s) kubelet Created container webserver\n Normal Started 9m40s (x3 over 9m56s) kubelet Started container webserver\n Normal SandboxChanged 9m30s (x7 over 9m54s) kubelet Pod sandbox changed, it will be killed and re-created.\n Warning BackOff 4m54s (x142 over 9m48s) kubelet Back-off restarting failed container webserver in pod ss2-1_statefulset-8368(0725b581-0c94-4f87-a9ac-808482103494)\n" Mar 7 22:09:02.122: INFO: Output of kubectl describe ss2-1: Name: ss2-1 Namespace: statefulset-8368 Priority: 0 Service Account: default Node: 172.17.0.1/172.17.0.1 Start Time: Tue, 07 Mar 2023 21:59:04 +0000 Labels: baz=blah controller-revision-hash=ss2-7b6c9599d5 foo=bar statefulset.kubernetes.io/pod-name=ss2-1 Annotations: <none> Status: Running IP: 10.88.10.183 IPs: IP: 10.88.10.183 IP: 2001:4860:4860::ab7 Controlled By: StatefulSet/ss2 Containers: webserver: Container ID: containerd://09abfaf673fac5c017a2a1735a8beabfeff065eb80a08bf7a5a0857195bef6d8 Image: registry.k8s.io/e2e-test-images/httpd:2.4.38-4 Image ID: registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 Port: <none> Host Port: <none> State: Waiting Reason: CrashLoopBackOff Last State: Terminated Reason: Error Exit Code: 137 Started: Tue, 07 Mar 2023 22:04:38 +0000 Finished: Tue, 07 Mar 2023 22:04:39 +0000 Ready: False Restart Count: 6 Readiness: http-get http://:80/index.html delay=0s timeout=1s period=1s #success=1 #failure=1 Environment: <none> Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-s6wgv (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: kube-api-access-s6wgv: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: <nil> DownwardAPI: true QoS Class: BestEffort Node-Selectors: <none> Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 9m58s default-scheduler Successfully assigned statefulset-8368/ss2-1 to 172.17.0.1 Normal Pulled 9m40s (x3 over 9m56s) kubelet Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Normal Created 9m40s (x3 over 9m56s) kubelet Created container webserver Normal Started 9m40s (x3 over 9m56s) kubelet Started container webserver Normal SandboxChanged 9m30s (x7 over 9m54s) kubelet Pod sandbox changed, it will be killed and re-created. Warning BackOff 4m54s (x142 over 9m48s) kubelet Back-off restarting failed container webserver in pod ss2-1_statefulset-8368(0725b581-0c94-4f87-a9ac-808482103494) Mar 7 22:09:02.122: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=statefulset-8368 logs ss2-1 --tail=100' Mar 7 22:09:02.211: INFO: stderr: "" Mar 7 22:09:02.211: INFO: stdout: "[Tue Mar 07 22:04:38.188577 2023] [mpm_event:notice] [pid 1:tid 140698573437800] AH00489: Apache/2.4.38 (Unix) configured -- resuming normal operations\n[Tue Mar 07 22:04:38.188644 2023] [core:notice] [pid 1:tid 140698573437800] AH00094: Command line: 'httpd -D FOREGROUND'\n10.88.0.1 - - [07/Mar/2023:22:04:38 +0000] \"GET /index.html HTTP/1.1\" 200 45\n10.88.0.1 - - [07/Mar/2023:22:04:39 +0000] \"GET /index.html HTTP/1.1\" 200 45\n" Mar 7 22:09:02.211: INFO: Last 100 log lines of ss2-1: [Tue Mar 07 22:04:38.188577 2023] [mpm_event:notice] [pid 1:tid 140698573437800] AH00489: Apache/2.4.38 (Unix) configured -- resuming normal operations [Tue Mar 07 22:04:38.188644 2023] [core:notice] [pid 1:tid 140698573437800] AH00094: Command line: 'httpd -D FOREGROUND' 10.88.0.1 - - [07/Mar/2023:22:04:38 +0000] "GET /index.html HTTP/1.1" 200 45 10.88.0.1 - - [07/Mar/2023:22:04:39 +0000] "GET /index.html HTTP/1.1" 200 45 Mar 7 22:09:02.211: INFO: Deleting all statefulset in ns statefulset-8368 Mar 7 22:09:02.215: INFO: Scaling statefulset ss2 to 0 Mar 7 22:09:42.234: INFO: Waiting for statefulset status.replicas updated to 0 Mar 7 22:09:42.236: INFO: Deleting statefulset ss2 < Exit [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:126 @ 03/07/23 22:09:42.247 (40.417s) > Enter [AfterEach] [sig-apps] StatefulSet - test/e2e/framework/node/init/init.go:33 @ 03/07/23 22:09:42.247 Mar 7 22:09:42.247: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-apps] StatefulSet - test/e2e/framework/node/init/init.go:33 @ 03/07/23 22:09:42.255 (8ms) > Enter [DeferCleanup (Each)] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 22:09:42.255 < Exit [DeferCleanup (Each)] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 22:09:42.255 (0s) > Enter [DeferCleanup (Each)] [sig-apps] StatefulSet - dump namespaces | framework.go:209 @ 03/07/23 22:09:42.255 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 22:09:42.255 STEP: Collecting events from namespace "statefulset-8368". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 22:09:42.256 STEP: Found 17 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 22:09:42.259 Mar 7 22:09:42.259: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for ss2-0: { } Scheduled: Successfully assigned statefulset-8368/ss2-0 to 172.17.0.1 Mar 7 22:09:42.259: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for ss2-1: { } Scheduled: Successfully assigned statefulset-8368/ss2-1 to 172.17.0.1 Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:01 +0000 UTC - event for ss2: {statefulset-controller } SuccessfulCreate: create Pod ss2-0 in StatefulSet ss2 successful Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:03 +0000 UTC - event for ss2-0: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:03 +0000 UTC - event for ss2-0: {kubelet 172.17.0.1} Created: Created container webserver Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:03 +0000 UTC - event for ss2-0: {kubelet 172.17.0.1} Started: Started container webserver Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:04 +0000 UTC - event for ss2: {statefulset-controller } SuccessfulCreate: create Pod ss2-1 in StatefulSet ss2 successful Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:06 +0000 UTC - event for ss2-0: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:06 +0000 UTC - event for ss2-1: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:06 +0000 UTC - event for ss2-1: {kubelet 172.17.0.1} Created: Created container webserver Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:06 +0000 UTC - event for ss2-1: {kubelet 172.17.0.1} Started: Started container webserver Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:08 +0000 UTC - event for ss2-1: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:11 +0000 UTC - event for ss2-0: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container webserver in pod ss2-0_statefulset-8368(5c7fd3b2-fa0c-48ed-822d-80e645f0b652) Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:14 +0000 UTC - event for ss2-1: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container webserver in pod ss2-1_statefulset-8368(0725b581-0c94-4f87-a9ac-808482103494) Mar 7 22:09:42.259: INFO: At 2023-03-07 21:59:22 +0000 UTC - event for ss2-0: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "http://10.88.9.7:80/index.html": dial tcp 10.88.9.7:80: connect: connection refused Mar 7 22:09:42.259: INFO: At 2023-03-07 22:09:36 +0000 UTC - event for ss2: {statefulset-controller } SuccessfulDelete: delete Pod ss2-1 in StatefulSet ss2 successful Mar 7 22:09:42.259: INFO: At 2023-03-07 22:09:39 +0000 UTC - event for ss2: {statefulset-controller } SuccessfulDelete: delete Pod ss2-0 in StatefulSet ss2 successful Mar 7 22:09:42.262: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 22:09:42.262: INFO: Mar 7 22:09:42.265: INFO: Logging node info for node 172.17.0.1 Mar 7 22:09:42.267: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 10125 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 22:09:25 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 22:09:25 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 22:09:42.267: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 22:09:42.271: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 22:09:42.276: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 22:09:42.276: INFO: Container coredns ready: false, restart count 18 Mar 7 22:09:42.314: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 22:09:42.314 (58ms) < Exit [DeferCleanup (Each)] [sig-apps] StatefulSet - dump namespaces | framework.go:209 @ 03/07/23 22:09:42.314 (58ms) > Enter [DeferCleanup (Each)] [sig-apps] StatefulSet - tear down framework | framework.go:206 @ 03/07/23 22:09:42.314 STEP: Destroying namespace "statefulset-8368" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 22:09:42.314 < Exit [DeferCleanup (Each)] [sig-apps] StatefulSet - tear down framework | framework.go:206 @ 03/07/23 22:09:42.319 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 22:09:42.32 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 22:09:42.32 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\sclient\sUpdate\sDemo\sshould\sscale\sa\sreplication\scontroller\s\s\[Conformance\]$'
[FAILED] Timed out after 300 seconds waiting for name=update-demo pods to reach valid state In [It] at: test/e2e/kubectl/kubectl.go:2350 @ 03/07/23 21:58:49.439from junit_01.xml
> Enter [BeforeEach] [sig-cli] Kubectl client - set up framework | framework.go:191 @ 03/07/23 21:53:48.47 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:53:48.47 Mar 7 21:53:48.470: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename kubectl - test/e2e/framework/framework.go:250 @ 03/07/23 21:53:48.471 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:53:48.486 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:53:48.49 < Exit [BeforeEach] [sig-cli] Kubectl client - set up framework | framework.go:191 @ 03/07/23 21:53:48.494 (24ms) > Enter [BeforeEach] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:53:48.494 < Exit [BeforeEach] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:53:48.494 (0s) > Enter [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:273 @ 03/07/23 21:53:48.494 < Exit [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:273 @ 03/07/23 21:53:48.494 (0s) > Enter [BeforeEach] Update Demo - test/e2e/kubectl/kubectl.go:325 @ 03/07/23 21:53:48.494 < Exit [BeforeEach] Update Demo - test/e2e/kubectl/kubectl.go:325 @ 03/07/23 21:53:48.494 (0s) > Enter [It] should scale a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:351 @ 03/07/23 21:53:48.494 STEP: creating a replication controller - test/e2e/kubectl/kubectl.go:354 @ 03/07/23 21:53:48.494 Mar 7 21:53:48.494: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 create -f -' Mar 7 21:53:49.115: INFO: stderr: "" Mar 7 21:53:49.115: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" STEP: waiting for all containers in name=update-demo pods to come up. - test/e2e/kubectl/kubectl.go:2310 @ 03/07/23 21:53:49.115 Mar 7 21:53:49.115: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:53:49.209: INFO: stderr: "" Mar 7 21:53:49.209: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:53:49.209: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:53:49.283: INFO: stderr: "" Mar 7 21:53:49.283: INFO: stdout: "" Mar 7 21:53:49.283: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:53:54.284: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:53:54.363: INFO: stderr: "" Mar 7 21:53:54.363: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:53:54.363: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:53:54.439: INFO: stderr: "" Mar 7 21:53:54.439: INFO: stdout: "" Mar 7 21:53:54.439: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:53:59.440: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:53:59.519: INFO: stderr: "" Mar 7 21:53:59.519: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:53:59.519: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:53:59.600: INFO: stderr: "" Mar 7 21:53:59.600: INFO: stdout: "" Mar 7 21:53:59.600: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:04.601: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:04.682: INFO: stderr: "" Mar 7 21:54:04.682: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:04.682: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:04.760: INFO: stderr: "" Mar 7 21:54:04.760: INFO: stdout: "" Mar 7 21:54:04.760: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:09.760: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:09.851: INFO: stderr: "" Mar 7 21:54:09.851: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:09.851: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:09.931: INFO: stderr: "" Mar 7 21:54:09.931: INFO: stdout: "" Mar 7 21:54:09.931: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:14.931: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:15.032: INFO: stderr: "" Mar 7 21:54:15.032: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:15.032: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:15.116: INFO: stderr: "" Mar 7 21:54:15.116: INFO: stdout: "" Mar 7 21:54:15.116: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:20.116: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:20.214: INFO: stderr: "" Mar 7 21:54:20.214: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:20.214: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:20.300: INFO: stderr: "" Mar 7 21:54:20.300: INFO: stdout: "" Mar 7 21:54:20.300: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:25.301: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:25.388: INFO: stderr: "" Mar 7 21:54:25.388: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:25.388: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:25.470: INFO: stderr: "" Mar 7 21:54:25.470: INFO: stdout: "" Mar 7 21:54:25.470: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:30.471: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:30.553: INFO: stderr: "" Mar 7 21:54:30.553: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:30.553: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:30.629: INFO: stderr: "" Mar 7 21:54:30.629: INFO: stdout: "" Mar 7 21:54:30.629: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:35.629: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:35.763: INFO: stderr: "" Mar 7 21:54:35.763: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:35.763: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:35.878: INFO: stderr: "" Mar 7 21:54:35.878: INFO: stdout: "" Mar 7 21:54:35.878: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:40.878: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:40.966: INFO: stderr: "" Mar 7 21:54:40.966: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:40.966: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:41.042: INFO: stderr: "" Mar 7 21:54:41.042: INFO: stdout: "" Mar 7 21:54:41.042: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:46.043: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:46.127: INFO: stderr: "" Mar 7 21:54:46.128: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:46.128: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:46.246: INFO: stderr: "" Mar 7 21:54:46.246: INFO: stdout: "" Mar 7 21:54:46.246: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:51.247: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:51.326: INFO: stderr: "" Mar 7 21:54:51.327: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:51.327: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:51.401: INFO: stderr: "" Mar 7 21:54:51.401: INFO: stdout: "" Mar 7 21:54:51.401: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:54:56.401: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:54:56.483: INFO: stderr: "" Mar 7 21:54:56.483: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:54:56.483: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:54:56.558: INFO: stderr: "" Mar 7 21:54:56.558: INFO: stdout: "" Mar 7 21:54:56.558: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:01.559: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:01.649: INFO: stderr: "" Mar 7 21:55:01.649: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:01.649: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:01.727: INFO: stderr: "" Mar 7 21:55:01.727: INFO: stdout: "" Mar 7 21:55:01.727: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:06.727: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:06.836: INFO: stderr: "" Mar 7 21:55:06.836: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:06.836: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:06.923: INFO: stderr: "" Mar 7 21:55:06.923: INFO: stdout: "" Mar 7 21:55:06.923: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:11.924: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:12.007: INFO: stderr: "" Mar 7 21:55:12.007: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:12.007: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:12.084: INFO: stderr: "" Mar 7 21:55:12.084: INFO: stdout: "" Mar 7 21:55:12.084: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:17.085: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:17.175: INFO: stderr: "" Mar 7 21:55:17.175: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:17.175: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:17.253: INFO: stderr: "" Mar 7 21:55:17.253: INFO: stdout: "" Mar 7 21:55:17.253: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:22.254: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:22.353: INFO: stderr: "" Mar 7 21:55:22.353: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:22.353: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:22.436: INFO: stderr: "" Mar 7 21:55:22.436: INFO: stdout: "" Mar 7 21:55:22.436: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:27.438: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:27.522: INFO: stderr: "" Mar 7 21:55:27.522: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:27.522: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:27.614: INFO: stderr: "" Mar 7 21:55:27.614: INFO: stdout: "" Mar 7 21:55:27.614: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:32.614: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:32.696: INFO: stderr: "" Mar 7 21:55:32.696: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:32.696: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:32.773: INFO: stderr: "" Mar 7 21:55:32.773: INFO: stdout: "" Mar 7 21:55:32.773: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:37.774: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:37.866: INFO: stderr: "" Mar 7 21:55:37.866: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:37.866: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:37.949: INFO: stderr: "" Mar 7 21:55:37.949: INFO: stdout: "" Mar 7 21:55:37.949: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:42.950: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:43.038: INFO: stderr: "" Mar 7 21:55:43.038: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:43.038: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:43.120: INFO: stderr: "" Mar 7 21:55:43.121: INFO: stdout: "" Mar 7 21:55:43.121: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:48.122: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:48.224: INFO: stderr: "" Mar 7 21:55:48.224: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:48.224: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:48.312: INFO: stderr: "" Mar 7 21:55:48.312: INFO: stdout: "" Mar 7 21:55:48.312: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:53.313: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:53.396: INFO: stderr: "" Mar 7 21:55:53.396: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:53.396: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:53.479: INFO: stderr: "" Mar 7 21:55:53.479: INFO: stdout: "" Mar 7 21:55:53.479: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:55:58.480: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:55:58.578: INFO: stderr: "" Mar 7 21:55:58.578: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:55:58.578: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:55:58.665: INFO: stderr: "" Mar 7 21:55:58.665: INFO: stdout: "" Mar 7 21:55:58.665: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:03.665: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:03.757: INFO: stderr: "" Mar 7 21:56:03.757: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:03.758: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:03.856: INFO: stderr: "" Mar 7 21:56:03.856: INFO: stdout: "" Mar 7 21:56:03.856: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:08.856: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:08.942: INFO: stderr: "" Mar 7 21:56:08.942: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:08.942: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:09.022: INFO: stderr: "" Mar 7 21:56:09.022: INFO: stdout: "" Mar 7 21:56:09.022: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:14.023: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:14.104: INFO: stderr: "" Mar 7 21:56:14.104: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:14.104: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:14.188: INFO: stderr: "" Mar 7 21:56:14.188: INFO: stdout: "" Mar 7 21:56:14.188: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:19.189: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:19.274: INFO: stderr: "" Mar 7 21:56:19.274: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:19.274: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:19.365: INFO: stderr: "" Mar 7 21:56:19.365: INFO: stdout: "" Mar 7 21:56:19.365: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:24.365: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:24.454: INFO: stderr: "" Mar 7 21:56:24.454: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:24.454: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:24.535: INFO: stderr: "" Mar 7 21:56:24.535: INFO: stdout: "" Mar 7 21:56:24.535: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:29.535: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:29.629: INFO: stderr: "" Mar 7 21:56:29.629: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:29.629: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:29.707: INFO: stderr: "" Mar 7 21:56:29.707: INFO: stdout: "" Mar 7 21:56:29.707: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:34.708: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:34.812: INFO: stderr: "" Mar 7 21:56:34.812: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:34.812: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:34.900: INFO: stderr: "" Mar 7 21:56:34.900: INFO: stdout: "" Mar 7 21:56:34.900: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:39.900: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:39.983: INFO: stderr: "" Mar 7 21:56:39.983: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:39.983: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:40.059: INFO: stderr: "" Mar 7 21:56:40.059: INFO: stdout: "" Mar 7 21:56:40.059: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:45.059: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:45.144: INFO: stderr: "" Mar 7 21:56:45.144: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:45.144: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:45.226: INFO: stderr: "" Mar 7 21:56:45.226: INFO: stdout: "" Mar 7 21:56:45.226: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:50.227: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:50.309: INFO: stderr: "" Mar 7 21:56:50.309: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:50.309: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:50.388: INFO: stderr: "" Mar 7 21:56:50.388: INFO: stdout: "" Mar 7 21:56:50.388: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:56:55.389: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:56:55.481: INFO: stderr: "" Mar 7 21:56:55.481: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:56:55.481: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:56:55.566: INFO: stderr: "" Mar 7 21:56:55.566: INFO: stdout: "" Mar 7 21:56:55.566: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:00.566: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:00.651: INFO: stderr: "" Mar 7 21:57:00.651: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:00.651: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:00.727: INFO: stderr: "" Mar 7 21:57:00.727: INFO: stdout: "" Mar 7 21:57:00.727: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:05.728: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:05.838: INFO: stderr: "" Mar 7 21:57:05.838: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:05.838: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:05.928: INFO: stderr: "" Mar 7 21:57:05.928: INFO: stdout: "" Mar 7 21:57:05.928: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:10.928: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:11.029: INFO: stderr: "" Mar 7 21:57:11.029: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:11.029: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:11.108: INFO: stderr: "" Mar 7 21:57:11.108: INFO: stdout: "" Mar 7 21:57:11.108: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:16.108: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:16.205: INFO: stderr: "" Mar 7 21:57:16.205: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:16.205: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:16.298: INFO: stderr: "" Mar 7 21:57:16.298: INFO: stdout: "" Mar 7 21:57:16.298: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:21.298: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:21.381: INFO: stderr: "" Mar 7 21:57:21.381: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:21.381: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:21.457: INFO: stderr: "" Mar 7 21:57:21.457: INFO: stdout: "" Mar 7 21:57:21.457: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:26.459: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:26.538: INFO: stderr: "" Mar 7 21:57:26.538: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:26.538: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:26.617: INFO: stderr: "" Mar 7 21:57:26.617: INFO: stdout: "" Mar 7 21:57:26.617: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:31.618: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:31.714: INFO: stderr: "" Mar 7 21:57:31.714: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:31.715: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:31.792: INFO: stderr: "" Mar 7 21:57:31.792: INFO: stdout: "" Mar 7 21:57:31.792: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:36.793: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:36.876: INFO: stderr: "" Mar 7 21:57:36.876: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:36.876: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:36.952: INFO: stderr: "" Mar 7 21:57:36.953: INFO: stdout: "" Mar 7 21:57:36.953: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:41.954: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:42.036: INFO: stderr: "" Mar 7 21:57:42.036: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:42.036: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:42.113: INFO: stderr: "" Mar 7 21:57:42.113: INFO: stdout: "" Mar 7 21:57:42.113: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:47.113: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:47.196: INFO: stderr: "" Mar 7 21:57:47.196: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:47.196: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:47.279: INFO: stderr: "" Mar 7 21:57:47.279: INFO: stdout: "" Mar 7 21:57:47.279: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:52.280: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:52.363: INFO: stderr: "" Mar 7 21:57:52.363: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:52.363: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:52.444: INFO: stderr: "" Mar 7 21:57:52.444: INFO: stdout: "" Mar 7 21:57:52.444: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:57:57.445: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:57:57.528: INFO: stderr: "" Mar 7 21:57:57.528: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:57:57.528: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:57:57.614: INFO: stderr: "" Mar 7 21:57:57.614: INFO: stdout: "" Mar 7 21:57:57.614: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:02.615: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:02.704: INFO: stderr: "" Mar 7 21:58:02.704: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:02.704: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:02.794: INFO: stderr: "" Mar 7 21:58:02.794: INFO: stdout: "" Mar 7 21:58:02.794: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:07.795: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:07.876: INFO: stderr: "" Mar 7 21:58:07.876: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:07.876: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:07.956: INFO: stderr: "" Mar 7 21:58:07.956: INFO: stdout: "" Mar 7 21:58:07.956: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:12.957: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:13.067: INFO: stderr: "" Mar 7 21:58:13.067: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:13.067: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:13.154: INFO: stderr: "" Mar 7 21:58:13.154: INFO: stdout: "" Mar 7 21:58:13.154: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:18.155: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:18.259: INFO: stderr: "" Mar 7 21:58:18.259: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:18.260: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:18.346: INFO: stderr: "" Mar 7 21:58:18.346: INFO: stdout: "" Mar 7 21:58:18.346: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:23.346: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:23.431: INFO: stderr: "" Mar 7 21:58:23.431: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:23.431: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:23.514: INFO: stderr: "" Mar 7 21:58:23.514: INFO: stdout: "" Mar 7 21:58:23.514: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:28.515: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:28.598: INFO: stderr: "" Mar 7 21:58:28.598: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:28.598: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:28.678: INFO: stderr: "" Mar 7 21:58:28.678: INFO: stdout: "" Mar 7 21:58:28.678: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:33.678: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:33.759: INFO: stderr: "" Mar 7 21:58:33.759: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:33.759: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:33.837: INFO: stderr: "" Mar 7 21:58:33.837: INFO: stdout: "" Mar 7 21:58:33.837: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:38.838: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:38.920: INFO: stderr: "" Mar 7 21:58:38.920: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:38.920: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:39.005: INFO: stderr: "" Mar 7 21:58:39.005: INFO: stdout: "" Mar 7 21:58:39.005: INFO: update-demo-nautilus-6jlhk is created but not running Mar 7 21:58:44.006: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 7 21:58:44.099: INFO: stderr: "" Mar 7 21:58:44.099: INFO: stdout: "update-demo-nautilus-6jlhk update-demo-nautilus-tlg8n " Mar 7 21:58:44.099: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods update-demo-nautilus-6jlhk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 7 21:58:44.184: INFO: stderr: "" Mar 7 21:58:44.184: INFO: stdout: "" Mar 7 21:58:44.184: INFO: update-demo-nautilus-6jlhk is created but not running Automatically polling progress: [sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance] (Spec Runtime: 5m0.025s) test/e2e/kubectl/kubectl.go:351 In [It] (Node Runtime: 5m0s) test/e2e/kubectl/kubectl.go:351 At [By Step] waiting for all containers in name=update-demo pods to come up. (Step Runtime: 4m59.38s) test/e2e/kubectl/kubectl.go:2310 Spec Goroutine goroutine 6540 [sleep] time.Sleep(0x12a05f200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/kubectl.validateController({0x7f07c801c938, 0xc00390ce80}, {0x7f62698, 0xc003917ba0}, {0xc000b0b230?, 0x2605af7?}, 0x2, {0x7562a18, 0xb}, {0x757a86a, ...}, ...) test/e2e/kubectl/kubectl.go:2312 | ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector | waitLoop: > for start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) { | getPodsOutput := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname) | pods := strings.Fields(getPodsOutput) > k8s.io/kubernetes/test/e2e/kubectl.glob..func1.6.3({0x7f07c801c938, 0xc00390ce80}) test/e2e/kubectl/kubectl.go:356 | ginkgo.By("creating a replication controller") | e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") > validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) | ginkgo.By("scaling down the replication controller") | debugDiscovery() k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00390ce80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 STEP: using delete to clean up resources - test/e2e/kubectl/kubectl.go:197 @ 03/07/23 21:58:49.185 Mar 7 21:58:49.185: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 delete --grace-period=0 --force -f -' Mar 7 21:58:49.272: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" Mar 7 21:58:49.272: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" Mar 7 21:58:49.272: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get rc,svc -l name=update-demo --no-headers' Mar 7 21:58:49.359: INFO: stderr: "No resources found in kubectl-4924 namespace.\n" Mar 7 21:58:49.359: INFO: stdout: "" Mar 7 21:58:49.360: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4924 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' Mar 7 21:58:49.439: INFO: stderr: "" Mar 7 21:58:49.439: INFO: stdout: "" [FAILED] Timed out after 300 seconds waiting for name=update-demo pods to reach valid state In [It] at: test/e2e/kubectl/kubectl.go:2350 @ 03/07/23 21:58:49.439 < Exit [It] should scale a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:351 @ 03/07/23 21:58:49.439 (5m0.945s) > Enter [AfterEach] [sig-cli] Kubectl client - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:58:49.439 Mar 7 21:58:49.439: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-cli] Kubectl client - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:58:49.452 (13ms) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:58:49.452 < Exit [DeferCleanup (Each)] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:58:49.452 (0s) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl client - dump namespaces | framework.go:209 @ 03/07/23 21:58:49.452 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:58:49.452 STEP: Collecting events from namespace "kubectl-4924". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:58:49.452 STEP: Found 20 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:58:49.456 Mar 7 21:58:49.456: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for update-demo-nautilus-6jlhk: { } Scheduled: Successfully assigned kubectl-4924/update-demo-nautilus-6jlhk to 172.17.0.1 Mar 7 21:58:49.456: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for update-demo-nautilus-tlg8n: { } Scheduled: Successfully assigned kubectl-4924/update-demo-nautilus-tlg8n to 172.17.0.1 Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:49 +0000 UTC - event for update-demo-nautilus: {replication-controller } SuccessfulCreate: Created pod: update-demo-nautilus-6jlhk Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:49 +0000 UTC - event for update-demo-nautilus: {replication-controller } SuccessfulCreate: Created pod: update-demo-nautilus-tlg8n Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:51 +0000 UTC - event for update-demo-nautilus-6jlhk: {kubelet 172.17.0.1} Pulling: Pulling image "registry.k8s.io/e2e-test-images/nautilus:1.7" Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:51 +0000 UTC - event for update-demo-nautilus-tlg8n: {kubelet 172.17.0.1} Pulling: Pulling image "registry.k8s.io/e2e-test-images/nautilus:1.7" Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:53 +0000 UTC - event for update-demo-nautilus-tlg8n: {kubelet 172.17.0.1} Failed: Error: failed to get sandbox container task: no running task found: task 996d605137afecb3c48080e95e94bef0c6171da3367bb97aabe86cb7defa4253 not found: not found Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:53 +0000 UTC - event for update-demo-nautilus-tlg8n: {kubelet 172.17.0.1} Pulled: Successfully pulled image "registry.k8s.io/e2e-test-images/nautilus:1.7" in 2.5217072s (2.521811236s including waiting) Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:54 +0000 UTC - event for update-demo-nautilus-6jlhk: {kubelet 172.17.0.1} Pulled: Successfully pulled image "registry.k8s.io/e2e-test-images/nautilus:1.7" in 108.447806ms (2.629768128s including waiting) Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:54 +0000 UTC - event for update-demo-nautilus-6jlhk: {kubelet 172.17.0.1} Failed: Error: failed to get sandbox container task: no running task found: task 27df991ccfa506d5d48747585f86d911258a9ac14e0357ed51114df38a941820 not found: not found Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:54 +0000 UTC - event for update-demo-nautilus-6jlhk: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:54 +0000 UTC - event for update-demo-nautilus-tlg8n: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:56 +0000 UTC - event for update-demo-nautilus-6jlhk: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/nautilus:1.7" already present on machine Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:56 +0000 UTC - event for update-demo-nautilus-6jlhk: {kubelet 172.17.0.1} Started: Started container update-demo Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:56 +0000 UTC - event for update-demo-nautilus-6jlhk: {kubelet 172.17.0.1} Created: Created container update-demo Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:56 +0000 UTC - event for update-demo-nautilus-tlg8n: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/nautilus:1.7" already present on machine Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:56 +0000 UTC - event for update-demo-nautilus-tlg8n: {kubelet 172.17.0.1} Created: Created container update-demo Mar 7 21:58:49.456: INFO: At 2023-03-07 21:53:56 +0000 UTC - event for update-demo-nautilus-tlg8n: {kubelet 172.17.0.1} Started: Started container update-demo Mar 7 21:58:49.456: INFO: At 2023-03-07 21:54:03 +0000 UTC - event for update-demo-nautilus-6jlhk: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container update-demo in pod update-demo-nautilus-6jlhk_kubectl-4924(379eda4f-7b5a-4e4b-ab74-41cb2349a14a) Mar 7 21:58:49.456: INFO: At 2023-03-07 21:54:04 +0000 UTC - event for update-demo-nautilus-tlg8n: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container update-demo in pod update-demo-nautilus-tlg8n_kubectl-4924(6b737796-17d3-4547-91d8-d84b048886ea) Mar 7 21:58:49.459: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:58:49.459: INFO: update-demo-nautilus-6jlhk 172.17.0.1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:53:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:56:44 +0000 UTC ContainersNotReady containers with unready status: [update-demo]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:56:44 +0000 UTC ContainersNotReady containers with unready status: [update-demo]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:53:49 +0000 UTC }] Mar 7 21:58:49.459: INFO: update-demo-nautilus-tlg8n 172.17.0.1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:53:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:55:23 +0000 UTC ContainersNotReady containers with unready status: [update-demo]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:55:23 +0000 UTC ContainersNotReady containers with unready status: [update-demo]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-07 21:53:49 +0000 UTC }] Mar 7 21:58:49.459: INFO: Mar 7 21:58:49.459: INFO: update-demo-nautilus-tlg8n[kubectl-4924].container[update-demo]=sandbox container "9775a4fc4864d32bb15a6eb40ee24f818dfc3ce3dcb3efe03223e399bba3d9f8" is not running Mar 7 21:58:49.490: INFO: Logging node info for node 172.17.0.1 Mar 7 21:58:49.492: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 7855 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:54:08 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:54:08 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:54:08 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:54:08 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:54:08 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:58:49.493: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:58:49.496: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:58:49.502: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:58:49.502: INFO: Container coredns ready: false, restart count 16 Mar 7 21:58:49.502: INFO: update-demo-nautilus-tlg8n started at 2023-03-07 21:53:49 +0000 UTC (0+1 container statuses recorded) Mar 7 21:58:49.502: INFO: Container update-demo ready: false, restart count 5 Mar 7 21:58:49.502: INFO: update-demo-nautilus-6jlhk started at 2023-03-07 21:53:49 +0000 UTC (0+1 container statuses recorded) Mar 7 21:58:49.502: INFO: Container update-demo ready: false, restart count 5 Mar 7 21:58:49.539: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:58:49.539 (87ms) < Exit [DeferCleanup (Each)] [sig-cli] Kubectl client - dump namespaces | framework.go:209 @ 03/07/23 21:58:49.539 (87ms) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl client - tear down framework | framework.go:206 @ 03/07/23 21:58:49.539 STEP: Destroying namespace "kubectl-4924" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:58:49.539 < Exit [DeferCleanup (Each)] [sig-cli] Kubectl client - tear down framework | framework.go:206 @ 03/07/23 21:58:49.547 (8ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:58:49.547 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:58:49.547 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\slogs\slogs\sshould\sbe\sable\sto\sretrieve\sand\sfilter\slogs\s\s\[Conformance\]$'
[FAILED] Pod logs-generator was not ready In [It] at: test/e2e/kubectl/logs.go:118 @ 03/07/23 21:21:58.485from junit_01.xml
> Enter [BeforeEach] [sig-cli] Kubectl logs - set up framework | framework.go:191 @ 03/07/23 21:16:58.363 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:16:58.363 Mar 7 21:16:58.363: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename kubectl-logs - test/e2e/framework/framework.go:250 @ 03/07/23 21:16:58.364 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:16:58.376 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:16:58.38 < Exit [BeforeEach] [sig-cli] Kubectl logs - set up framework | framework.go:191 @ 03/07/23 21:16:58.384 (21ms) > Enter [BeforeEach] [sig-cli] Kubectl logs - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:16:58.384 < Exit [BeforeEach] [sig-cli] Kubectl logs - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:16:58.384 (0s) > Enter [BeforeEach] [sig-cli] Kubectl logs - test/e2e/kubectl/logs.go:79 @ 03/07/23 21:16:58.384 < Exit [BeforeEach] [sig-cli] Kubectl logs - test/e2e/kubectl/logs.go:79 @ 03/07/23 21:16:58.384 (0s) > Enter [BeforeEach] logs - test/e2e/kubectl/logs.go:94 @ 03/07/23 21:16:58.384 STEP: creating an pod - test/e2e/kubectl/logs.go:95 @ 03/07/23 21:16:58.384 Mar 7 21:16:58.384: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-logs-5650 run logs-generator --image=registry.k8s.io/e2e-test-images/agnhost:2.43 --restart=Never --pod-running-timeout=2m0s -- logs-generator --log-lines-total 100 --run-duration 20s' Mar 7 21:16:58.483: INFO: stderr: "" Mar 7 21:16:58.483: INFO: stdout: "pod/logs-generator created\n" < Exit [BeforeEach] logs - test/e2e/kubectl/logs.go:94 @ 03/07/23 21:16:58.483 (100ms) > Enter [It] should be able to retrieve and filter logs [Conformance] - test/e2e/kubectl/logs.go:114 @ 03/07/23 21:16:58.483 STEP: Waiting for log generator to start. - test/e2e/kubectl/logs.go:116 @ 03/07/23 21:16:58.483 Mar 7 21:16:58.483: INFO: Waiting up to 5m0s for 1 pods to be running and ready, or succeeded: [logs-generator] Automatically polling progress: [sig-cli] Kubectl logs logs should be able to retrieve and filter logs [Conformance] (Spec Runtime: 5m0.121s) test/e2e/kubectl/logs.go:114 In [It] (Node Runtime: 5m0.001s) test/e2e/kubectl/logs.go:114 At [By Step] Waiting for log generator to start. (Step Runtime: 5m0.001s) test/e2e/kubectl/logs.go:116 Spec Goroutine goroutine 2561 [runnable] time.appendInt({0xc003dc1a80?, 0x7?, 0x40?}, 0x15?, 0x2?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/time/format.go:399 time.Time.appendFormat({0x66077a0?, 0xc003dc19e0?, 0xad00e80?}, {0xc003dc1a80, 0x0, 0x40}, {0x758a9fc, 0x13}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/time/format.go:726 time.Time.AppendFormat({0x2?, 0x26081fe?, 0xad00e80?}, {0xc003dc1a80, 0x0, 0x40}, {0x758a9fc, 0x13}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/time/format.go:644 time.Time.Format({0x4?, 0x71?, 0xad00e80?}, {0x758a9fc?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/time/format.go:630 k8s.io/kubernetes/test/e2e/framework.nowStamp() test/e2e/framework/log.go:27 k8s.io/kubernetes/test/e2e/framework.log({0x754a9c8, 0x4}, {0x75d1bc6, 0x1d}, {0xc003dc1c80, 0x2, 0x2}) test/e2e/framework/log.go:31 k8s.io/kubernetes/test/e2e/framework.Logf(...) test/e2e/framework/log.go:36 k8s.io/kubernetes/test/e2e/framework/pod.checkPodsCondition({0x7f07c801c938?, 0xc0030ae0c0}, {0x7f62698?, 0xc00377cb60}, {0xc002b670f8, 0x11}, {0xc00304e430, 0x1, 0x1}, 0x45d964b800, ...) test/e2e/framework/pod/resource.go:431 k8s.io/kubernetes/test/e2e/framework/pod.CheckPodsRunningReadyOrSucceeded(...) test/e2e/framework/pod/resource.go:406 > k8s.io/kubernetes/test/e2e/kubectl.glob..func2.3.3({0x7f07c801c938, 0xc0030ae0c0}) test/e2e/kubectl/logs.go:117 | | ginkgo.By("Waiting for log generator to start.") > if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) { | framework.Failf("Pod %s was not ready", podName) | } k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc0030ae0c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Begin Additional Progress Reports >> The matcher passed to Eventually returned the following error: want pod 'logs-generator' on '172.17.0.1' to be 'Running' but was 'Failed' <*errors.errorString | 0xc00312ee70>: { s: "want pod 'logs-generator' on '172.17.0.1' to be 'Running' but was 'Failed'", } << End Additional Progress Reports Mar 7 21:21:58.484: INFO: Pod logs-generator failed to be running and ready, or succeeded. Mar 7 21:21:58.485: INFO: Wanted all 1 pods to be running and ready, or succeeded. Result: false. Pods: [logs-generator] [FAILED] Pod logs-generator was not ready In [It] at: test/e2e/kubectl/logs.go:118 @ 03/07/23 21:21:58.485 < Exit [It] should be able to retrieve and filter logs [Conformance] - test/e2e/kubectl/logs.go:114 @ 03/07/23 21:21:58.485 (5m0.002s) > Enter [AfterEach] logs - test/e2e/kubectl/logs.go:99 @ 03/07/23 21:21:58.485 Mar 7 21:21:58.485: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-logs-5650 delete pod logs-generator' Mar 7 21:21:58.583: INFO: stderr: "" Mar 7 21:21:58.583: INFO: stdout: "pod \"logs-generator\" deleted\n" < Exit [AfterEach] logs - test/e2e/kubectl/logs.go:99 @ 03/07/23 21:21:58.583 (98ms) > Enter [AfterEach] [sig-cli] Kubectl logs - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:21:58.583 Mar 7 21:21:58.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-cli] Kubectl logs - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:21:58.587 (3ms) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl logs - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:21:58.587 < Exit [DeferCleanup (Each)] [sig-cli] Kubectl logs - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:21:58.587 (0s) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl logs - dump namespaces | framework.go:209 @ 03/07/23 21:21:58.587 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:21:58.587 STEP: Collecting events from namespace "kubectl-logs-5650". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:21:58.587 STEP: Found 4 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:21:58.589 Mar 7 21:21:58.589: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for logs-generator: { } Scheduled: Successfully assigned kubectl-logs-5650/logs-generator to 172.17.0.1 Mar 7 21:21:58.589: INFO: At 2023-03-07 21:17:00 +0000 UTC - event for logs-generator: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 21:21:58.589: INFO: At 2023-03-07 21:17:00 +0000 UTC - event for logs-generator: {kubelet 172.17.0.1} Created: Created container logs-generator Mar 7 21:21:58.589: INFO: At 2023-03-07 21:17:01 +0000 UTC - event for logs-generator: {kubelet 172.17.0.1} Started: Started container logs-generator Mar 7 21:21:58.592: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:21:58.592: INFO: Mar 7 21:21:58.595: INFO: Logging node info for node 172.17.0.1 Mar 7 21:21:58.597: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 2557 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:17:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:17:34 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:17:34 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:17:34 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:17:34 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:21:58.597: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:21:58.600: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:21:58.616: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:21:58.616: INFO: Container coredns ready: false, restart count 9 Mar 7 21:21:58.646: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:21:58.646 (60ms) < Exit [DeferCleanup (Each)] [sig-cli] Kubectl logs - dump namespaces | framework.go:209 @ 03/07/23 21:21:58.647 (60ms) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl logs - tear down framework | framework.go:206 @ 03/07/23 21:21:58.647 STEP: Destroying namespace "kubectl-logs-5650" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:21:58.647 < Exit [DeferCleanup (Each)] [sig-cli] Kubectl logs - tear down framework | framework.go:206 @ 03/07/23 21:21:58.652 (5ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:21:58.652 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:21:58.652 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sDNS\sshould\sprovide\s\/etc\/hosts\sentries\sfor\sthe\scluster\s\[Conformance\]$'
[FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/07/23 22:24:20.306from junit_01.xml
> Enter [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/07/23 22:13:43.951 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 22:13:43.951 Mar 7 22:13:43.951: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename dns - test/e2e/framework/framework.go:250 @ 03/07/23 22:13:43.952 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 22:13:43.966 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 22:13:43.97 < Exit [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/07/23 22:13:43.978 (27ms) > Enter [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 22:13:43.978 < Exit [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 22:13:43.978 (0s) > Enter [It] should provide /etc/hosts entries for the cluster [Conformance] - test/e2e/network/dns.go:117 @ 03/07/23 22:13:43.978 STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-6931.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;sleep 1; done - test/e2e/network/dns.go:123 @ 03/07/23 22:13:43.978 STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-6931.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;sleep 1; done - test/e2e/network/dns.go:124 @ 03/07/23 22:13:43.978 STEP: creating a pod to probe /etc/hosts - test/e2e/network/dns.go:127 @ 03/07/23 22:13:43.978 STEP: submitting the pod to kubernetes - test/e2e/network/dns_common.go:496 @ 03/07/23 22:13:43.978 STEP: retrieving the pod - test/e2e/network/dns_common.go:508 @ 03/07/23 22:13:48 STEP: looking for the results for each expected name from probers - test/e2e/network/dns_common.go:514 @ 03/07/23 22:13:48.003 Mar 7 22:13:51.075: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:13:51.086: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local] Mar 7 22:13:56.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:13:56.095: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:13:56.098: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:13:56.101: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:13:56.101: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:14:01.090: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:01.110: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:01.115: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:01.121: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:01.121: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:14:06.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:06.096: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:06.099: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:06.103: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:06.103: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:14:14.147: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:14.150: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:14.153: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:14.156: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:14.156: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:14:46.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:46.095: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:46.098: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:46.102: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:46.102: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:14:54.147: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:54.151: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:54.154: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:54.158: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:14:54.158: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:15:26.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:15:29.155: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:15:32.227: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:15:35.299: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:15:35.299: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:15:39.171: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:15:39.175: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:15:39.178: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:15:39.182: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:15:39.182: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:16:11.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:14.147: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:17.219: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:20.295: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:20.295: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:16:24.163: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:24.167: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:24.170: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:24.173: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:24.173: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:16:56.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:56.096: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:56.100: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:56.103: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:16:56.103: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:17:04.163: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:07.239: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:07.243: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:07.247: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:07.247: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:17:11.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:11.095: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:11.098: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:11.102: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:11.102: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:17:19.171: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:19.175: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:19.178: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:19.181: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:19.181: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:17:51.090: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:54.151: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:57.219: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:57.223: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:17:57.223: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:18:31.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:18:34.151: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:18:34.155: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:18:34.158: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:18:34.158: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 5m0.027s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 5m0s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 4m55.975s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc002d2c800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2c800, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2c800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2c800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc005812ae0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc002d2c700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2c700, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc002d2c700, {0x100?, 0xc0003b8800?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc002d2c700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfc240, {0x7f24250, 0xc005812960}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfc240, {0x7f24250, 0xc005812960}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 5m20.029s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 5m20.002s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m15.976s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc002d2c800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2c800, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2c800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2c800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc005812ae0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc002d2c700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2c700, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc002d2c700, {0x100?, 0xc0003b8800?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc002d2c700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfc240, {0x7f24250, 0xc005812960}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfc240, {0x7f24250, 0xc005812960}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:19:06.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:09.159: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:12.227: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:15.299: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:15.299: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:19:19.139: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:19.150: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 5m40.031s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 5m40.004s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m35.979s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc0024c8800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0024c8800, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0024c8800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0024c8800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002854d50?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc0024c8700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0024c8700, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc0024c8700, {0x100?, 0xad02cc0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc0024c8700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc005906360, {0x7f24250, 0xc002854bd0}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc005906360, {0x7f24250, 0xc002854bd0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 6m0.032s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 6m0.005s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m55.98s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc0024c8800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0024c8800, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0024c8800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0024c8800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002854d50?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc0024c8700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0024c8700, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc0024c8700, {0x100?, 0xad02cc0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc0024c8700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc005906360, {0x7f24250, 0xc002854bd0}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc005906360, {0x7f24250, 0xc002854bd0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:19:51.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:51.096: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:51.099: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:51.102: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:51.102: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:19:56.090: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:56.094: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:56.097: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:56.100: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:19:56.100: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 6m20.034s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 6m20.007s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m15.982s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc002d2c200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2c200, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2c200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2c200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0058123f0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc002d2c100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2c100, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc002d2c100, {0x100?, 0xad02cc0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc002d2c100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfc000, {0x7f24250, 0xc005812270}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfc000, {0x7f24250, 0xc005812270}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:20:04.163: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:04.167: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:04.170: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:04.173: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:04.173: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 6m40.036s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 6m40.009s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m35.983s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc0024c8a00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0024c8a00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0024c8a00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0024c8a00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003ddeba0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc0024c8900) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0024c8900, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc0024c8900, {0x100?, 0xc0003b8800?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc0024c8900) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc005906240, {0x7f24250, 0xc003dde990}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc005906240, {0x7f24250, 0xc003dde990}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:20:36.092: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:36.096: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:36.099: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:36.102: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:36.102: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 7m0.038s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 7m0.011s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m55.986s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc002d2cb00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2cb00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2cb00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2cb00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc005812ed0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc002d2ca00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2ca00, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc002d2ca00, {0x100?, 0xc0003b8800?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc002d2ca00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfc480, {0x7f24250, 0xc005812d50}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfc480, {0x7f24250, 0xc005812d50}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:20:44.163: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:44.167: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:44.170: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:44.173: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:20:44.173: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 7m20.04s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 7m20.013s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m15.987s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc0024c8d00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0024c8d00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0024c8d00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0024c8d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003ddf3e0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc0024c8c00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0024c8c00, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc0024c8c00, {0x100?, 0xc0003b8800?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc0024c8c00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0059066c0, {0x7f24250, 0xc003ddf200}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0059066c0, {0x7f24250, 0xc003ddf200}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:21:16.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:16.095: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:16.099: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:16.102: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:16.102: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:21:21.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:21.094: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:21.099: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:21.102: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:21.102: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 7m40.042s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 7m40.015s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m35.99s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 8m0.044s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 8m0.017s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m55.991s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc002d2d800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2d800, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2d800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2d800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc005813aa0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc002d2d700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2d700, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc002d2d700, {0x100?, 0xc0003b8c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc002d2d700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfca20, {0x7f24250, 0xc005813920}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfca20, {0x7f24250, 0xc005813920}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:21:56.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:21:59.171: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:02.243: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 8m20.046s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 8m20.019s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m15.993s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc0024c8100) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0024c8100, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0024c8100?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0024c8100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003dde6c0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc0024c8000) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0024c8000, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc0024c8000, {0x0?, 0xc819d240?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc0024c8000) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc00456a120, {0x7f24250, 0xc005813920}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc00456a120, {0x7f24250, 0xc005813920}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:22:05.315: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:05.315: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:22:06.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:06.095: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:06.098: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:06.102: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:06.102: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:22:14.147: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:14.151: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:14.154: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:14.157: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:14.157: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 8m40.048s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 8m40.021s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m35.995s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc002d2dd00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2dd00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2dd00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2dd00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002854270?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc002d2dc00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2dc00, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc002d2dc00, {0x100?, 0xc0003b8c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc002d2dc00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfcc60, {0x7f24250, 0xc002854060}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfcc60, {0x7f24250, 0xc002854060}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 9m0.049s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 9m0.022s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m55.997s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc002d2dd00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2dd00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2dd00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2dd00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002854270?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc002d2dc00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2dc00, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc002d2dc00, {0x100?, 0xc0003b8c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc002d2dc00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfcc60, {0x7f24250, 0xc002854060}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfcc60, {0x7f24250, 0xc002854060}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:22:46.092: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:46.099: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:46.106: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:46.111: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:46.111: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:22:54.151: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:57.219: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:57.223: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:57.227: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:22:57.227: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:23:01.090: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:23:01.095: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:23:01.099: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:23:01.102: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:23:01.102: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 9m20.051s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 9m20.024s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m15.998s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 9m40.052s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 9m40.025s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m35.999s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc0017f6e00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0017f6e00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0017f6e00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0017f6e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002855c20?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc0017f6d00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017f6d00, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc0017f6d00, {0x100?, 0xad02cc0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc0017f6d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfd440, {0x7f24250, 0xc002855aa0}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfd440, {0x7f24250, 0xc002855aa0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:23:36.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:23:39.171: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:23:39.175: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:23:39.178: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:23:39.179: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 10m0.054s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 10m0.027s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m56.002s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc0017f7200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0017f7200, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0017f7200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0017f7200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002855fb0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc0017f7100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017f7100, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc0017f7100, {0x100?, 0xad02cc0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc0017f7100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfd560, {0x7f24250, 0xc002855e30}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfd560, {0x7f24250, 0xc002855e30}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 10m20.056s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 10m20.029s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 10m16.003s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 8501 [select, 2 minutes] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000233500, 0xc0017f7200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0017f7200, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0017f7200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0017f7200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002855fb0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0059098e0, 0xc0017f7100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017f7100, {0x7ef2ae0, 0xc0059098e0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0058e3380, 0xc0017f7100, {0x100?, 0xad02cc0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0058e3380, 0xc0017f7100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc001bfd560, {0x7f24250, 0xc002855e30}, 0xc000bee3b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc001bfd560, {0x7f24250, 0xc002855e30}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc00456c300?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc00456c300?}, 0x19?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc00456c300}, 0xc00509a168, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc00456c300}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc00456c300?}, {0xc00456c340?, 0x7f07c801c938?, 0xc00456c300?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc0058f49c0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc00456c300}, 0xc000ca91d0, 0xc00455d680, {0xc00456c340, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f07c801c938, 0xc00456c300}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc00456c300}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 22:24:11.091: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:24:14.151: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:24:17.219: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:24:20.291: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:24:20.291: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:24:20.295: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:24:20.299: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:24:20.302: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:24:20.306: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: the server is currently unable to handle the request (get pods dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0) Mar 7 22:24:20.306: INFO: Lookups using dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-6931.svc.cluster.local jessie_hosts@dns-querier-1] Mar 7 22:24:20.306: INFO: Unexpected error: <*errors.errorString | 0xaaf3910>: { s: "timed out waiting for the condition", } [FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/07/23 22:24:20.306 < Exit [It] should provide /etc/hosts entries for the cluster [Conformance] - test/e2e/network/dns.go:117 @ 03/07/23 22:24:20.306 (10m36.329s) > Enter [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:33 @ 03/07/23 22:24:20.306 Mar 7 22:24:20.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:33 @ 03/07/23 22:24:20.31 (3ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns_common.go:498 @ 03/07/23 22:24:20.31 STEP: deleting the pod - test/e2e/network/dns_common.go:499 @ 03/07/23 22:24:20.31 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns_common.go:498 @ 03/07/23 22:24:20.322 (12ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 22:24:20.322 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 22:24:20.322 (0s) > Enter [DeferCleanup (Each)] [sig-network] DNS - dump namespaces | framework.go:209 @ 03/07/23 22:24:20.322 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 22:24:20.322 STEP: Collecting events from namespace "dns-6931". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 22:24:20.322 STEP: Found 14 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 22:24:20.325 Mar 7 22:24:20.325: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: { } Scheduled: Successfully assigned dns-6931/dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0 to 172.17.0.1 Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Created: Created container webserver Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Started: Started container webserver Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Created: Created container querier Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Started: Started container querier Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7" already present on machine Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Created: Created container jessie-querier Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:46 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} Started: Started container jessie-querier Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:47 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:53 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container webserver in pod dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0_dns-6931(d8cfad67-4dd7-4ef1-a74d-42a7cbf1e81d) Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:53 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container querier in pod dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0_dns-6931(d8cfad67-4dd7-4ef1-a74d-42a7cbf1e81d) Mar 7 22:24:20.325: INFO: At 2023-03-07 22:13:53 +0000 UTC - event for dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container jessie-querier in pod dns-test-6bae8bab-8403-42be-9403-db0f5474c9e0_dns-6931(d8cfad67-4dd7-4ef1-a74d-42a7cbf1e81d) Mar 7 22:24:20.328: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 22:24:20.328: INFO: Mar 7 22:24:20.331: INFO: Logging node info for node 172.17.0.1 Mar 7 22:24:20.334: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 12271 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 22:19:36 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 22:19:36 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 22:19:36 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 22:19:36 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 22:19:36 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 22:24:20.334: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 22:24:20.338: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 22:24:20.357: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 22:24:20.357: INFO: Container coredns ready: false, restart count 21 Mar 7 22:24:20.392: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 22:24:20.392 (70ms) < Exit [DeferCleanup (Each)] [sig-network] DNS - dump namespaces | framework.go:209 @ 03/07/23 22:24:20.392 (70ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - tear down framework | framework.go:206 @ 03/07/23 22:24:20.392 STEP: Destroying namespace "dns-6931" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 22:24:20.392 < Exit [DeferCleanup (Each)] [sig-network] DNS - tear down framework | framework.go:206 @ 03/07/23 22:24:20.398 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 22:24:20.398 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 22:24:20.398 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sDNS\sshould\sprovide\sDNS\sfor\sExternalName\sservices\s\[Conformance\]$'
[FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/07/23 21:51:43.651from junit_01.xml
> Enter [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/07/23 21:40:56.391 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:40:56.391 Mar 7 21:40:56.391: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename dns - test/e2e/framework/framework.go:250 @ 03/07/23 21:40:56.392 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:40:56.402 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:40:56.405 < Exit [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/07/23 21:40:56.408 (18ms) > Enter [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:40:56.408 < Exit [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:40:56.409 (0s) > Enter [It] should provide DNS for ExternalName services [Conformance] - test/e2e/network/dns.go:329 @ 03/07/23 21:40:56.409 STEP: Creating a test externalName service - test/e2e/network/dns.go:331 @ 03/07/23 21:40:56.409 STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-4413.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local; sleep 1; done - test/e2e/network/dns.go:345 @ 03/07/23 21:40:56.413 STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-4413.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local; sleep 1; done - test/e2e/network/dns.go:346 @ 03/07/23 21:40:56.413 STEP: creating a pod to probe DNS - test/e2e/network/dns.go:349 @ 03/07/23 21:40:56.413 STEP: submitting the pod to kubernetes - test/e2e/network/dns_common.go:523 @ 03/07/23 21:40:56.413 STEP: retrieving the pod - test/e2e/network/dns_common.go:535 @ 03/07/23 21:41:00.432 STEP: looking for the results for each expected name from probers - test/e2e/network/dns_common.go:541 @ 03/07/23 21:41:00.435 Mar 7 21:41:10.563: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:41:13.635: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:41:13.635: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:41:21.703: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:41:21.707: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:41:21.707: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:41:53.641: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:41:53.645: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:41:53.645: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:42:01.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:42:01.704: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:42:01.704: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:42:33.641: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:42:33.645: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:42:33.645: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:42:41.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:42:41.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:42:41.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:43:13.642: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:43:13.645: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:43:13.645: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:43:21.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:43:21.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:43:21.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:43:53.643: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:43:53.646: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:43:53.646: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:44:01.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:44:01.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:44:01.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:44:33.641: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:44:33.644: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:44:33.644: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:44:41.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:44:41.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:44:41.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:45:13.640: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:45:13.645: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:45:13.645: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:45:21.703: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:45:21.707: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:45:21.707: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:45:53.640: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:45:53.646: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:45:53.646: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 5m0.019s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 5m0.001s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 4m55.974s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:46:01.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:46:01.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:46:01.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 5m20.021s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 5m20.003s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m15.976s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc0017f6e00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0017f6e00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0017f6e00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0017f6e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0030e5320?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc0017f6d00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017f6d00, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc0017f6d00, {0x100?, 0xc000101c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc0017f6d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0029e26c0, {0x7f24250, 0xc0030e51a0}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0029e26c0, {0x7f24250, 0xc0030e51a0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:46:33.641: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:46:33.645: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:46:33.645: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 5m40.022s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 5m40.005s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m35.978s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:46:41.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:46:41.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:46:41.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 6m0.024s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 6m0.006s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m55.979s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00061ac00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00061ac00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00061ac00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00061ac00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0030e4810?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00061a600) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00061a600, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00061a600, {0x100?, 0xc000101c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00061a600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e42240, {0x7f24250, 0xc0030e4660}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e42240, {0x7f24250, 0xc0030e4660}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:47:13.641: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:47:13.645: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:47:13.645: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 6m20.026s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 6m20.008s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m15.981s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:47:21.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:47:21.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:47:21.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 6m40.027s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 6m40.009s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m35.982s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00158ec00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00158ec00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00158ec00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00158ec00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0030e54a0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00158e600) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00158e600, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00158e600, {0x0?, 0x100101c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00158e600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e425a0, {0x7f24250, 0xc0030e5260}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e425a0, {0x7f24250, 0xc0030e5260}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:47:53.642: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:47:53.645: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:47:53.646: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 7m0.029s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 7m0.011s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m55.984s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:48:01.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:48:01.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:48:01.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 7m20.031s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 7m20.013s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m15.986s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc0024c8e00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0024c8e00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0024c8e00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0024c8e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003672e70?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc0024c8d00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0024c8d00, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc0024c8d00, {0x100?, 0xc000600000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc0024c8d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0032f45a0, {0x7f24250, 0xc003672cf0}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0032f45a0, {0x7f24250, 0xc003672cf0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:48:33.641: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:48:33.645: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:48:33.645: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 7m40.033s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 7m40.015s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m35.988s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:48:41.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:48:41.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:48:41.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 8m0.034s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 8m0.016s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m55.99s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00086a500) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00086a500, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00086a500?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00086a500) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0030e46f0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00086a400) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00086a400, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00086a400, {0x100?, 0xc000838000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00086a400) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e42240, {0x7f24250, 0xc0030e4540}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e42240, {0x7f24250, 0xc0030e4540}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:49:02.115: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:49:05.187: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:49:05.187: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:49:14.755: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 8m20.036s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 8m20.018s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m15.991s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc0017f7700) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0017f7700, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0017f7700?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0017f7700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003673080?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc0017f7600) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017f7600, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc0017f7600, {0x100?, 0xc000600000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc0017f7600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0029e2480, {0x7f24250, 0xc003672960}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0029e2480, {0x7f24250, 0xc003672960}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:49:17.827: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:49:17.827: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:49:21.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:49:21.703: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:49:21.703: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 8m40.037s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 8m40.02s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m35.993s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00086ab00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00086ab00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00086ab00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00086ab00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0030e4f60?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00086aa00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00086aa00, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00086aa00, {0x100?, 0xc000101c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00086aa00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e42480, {0x7f24250, 0xc0030e4db0}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e42480, {0x7f24250, 0xc0030e4db0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:49:53.641: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 9m0.039s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 9m0.021s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m55.995s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00086ae00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00086ae00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00086ae00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00086ae00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0030e51d0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00086ad00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00086ad00, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00086ad00, {0x100?, 0xc000600000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00086ad00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e425a0, {0x7f24250, 0xc0030e4db0}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e425a0, {0x7f24250, 0xc0030e4db0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:49:56.709: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:49:56.709: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:50:01.699: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:05.795: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:05.795: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:50:08.867: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:11.940: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:11.940: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 9m20.04s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 9m20.023s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m15.996s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc000e3c000) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc000e3c000, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc000e3c000?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc000e3c000) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003673bc0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc0017f7f00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017f7f00, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc0017f7f00, {0x100?, 0xc000600000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc0017f7f00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0029e27e0, {0x7f24250, 0xc003673a40}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0029e27e0, {0x7f24250, 0xc003673a40}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:50:16.711: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:19.779: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:19.779: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:50:23.640: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:23.644: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:23.644: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:50:31.719: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:31.722: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:50:31.722: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 9m40.042s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 9m40.024s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m35.997s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00061ac00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00061ac00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00061ac00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00061ac00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002322240?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00061a600) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00061a600, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00061a600, {0x100?, 0xc000838000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00061a600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0029e2000, {0x7f24250, 0xc002322000}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0029e2000, {0x7f24250, 0xc002322000}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 10m0.043s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 10m0.025s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m55.998s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00061ac00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00061ac00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00061ac00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00061ac00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc002322240?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00061a600) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00061a600, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00061a600, {0x100?, 0xc000838000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00061a600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0029e2000, {0x7f24250, 0xc002322000}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0029e2000, {0x7f24250, 0xc002322000}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:51:03.640: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:51:03.644: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:51:03.644: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:51:11.715: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:51:11.719: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:51:11.719: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 10m20.045s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 10m20.027s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 10m16.001s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00086a800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00086a800, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00086a800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00086a800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0030e4ae0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00086a700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00086a700, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00086a700, {0x100?, 0xad02cc0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00086a700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e42360, {0x7f24250, 0xc0030e4930}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e42360, {0x7f24250, 0xc0030e4930}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide DNS for ExternalName services [Conformance] (Spec Runtime: 10m40.047s) test/e2e/network/dns.go:329 In [It] (Node Runtime: 10m40.029s) test/e2e/network/dns.go:329 At [By Step] looking for the results for each expected name from probers (Step Runtime: 10m36.003s) test/e2e/network/dns_common.go:541 Spec Goroutine goroutine 4530 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc00086a800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc00086a800, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc00086a800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc00086a800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0030e4ae0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0027a5aa0, 0xc00086a700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc00086a700, {0x7ef2ae0, 0xc0027a5aa0}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc0032ac780, 0xc00086a700, {0x100?, 0xad02cc0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc0032ac780, 0xc00086a700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e42360, {0x7f24250, 0xc0030e4930}, 0xc003656e00?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e42360, {0x7f24250, 0xc0030e4930}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc002fd56c0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc002fd56c0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc002fd56c0}, 0xc0043bb110, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc002fd56c0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc002fd56c0?}, {0xc003657480?, 0x7f07c801c938?, 0xc002fd56c0?}, {0x7553820?, 0x2d?}, 0xc000704ee0?, {0x7f62698, 0xc00430c4e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.validateTargetedProbeOutput({0x7f07c801c938, 0xc002fd56c0}, 0xc000ca91d0, 0xc002c0d200, {0xc003657480, 0x2, 0x2}, {0x7579f7a, 0x10}) test/e2e/network/dns_common.go:542 | // Try to find the expected value for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) | | framework.Logf("DNS probes using %s succeeded\n", pod.Name) > k8s.io/kubernetes/test/e2e/network.glob..func2.9({0x7f07c801c938, 0xc002fd56c0}) test/e2e/network/dns.go:352 | pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) | > validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") | | // Test changing the externalName field k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc002fd56c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:51:43.640: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:51:43.644: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:51:43.644: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:51:43.648: INFO: Unable to read wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:51:43.651: INFO: Unable to read jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local from pod dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: the server is currently unable to handle the request (get pods dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f) Mar 7 21:51:43.651: INFO: Lookups using dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f failed for: [wheezy_udp@dns-test-service-3.dns-4413.svc.cluster.local jessie_udp@dns-test-service-3.dns-4413.svc.cluster.local] Mar 7 21:51:43.651: INFO: Unexpected error: <*errors.errorString | 0xaaf3910>: { s: "timed out waiting for the condition", } [FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/07/23 21:51:43.651 < Exit [It] should provide DNS for ExternalName services [Conformance] - test/e2e/network/dns.go:329 @ 03/07/23 21:51:43.651 (10m47.243s) > Enter [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:51:43.651 Mar 7 21:51:43.651: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:33 @ 03/07/23 21:51:43.654 (3ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns_common.go:525 @ 03/07/23 21:51:43.654 STEP: deleting the pod - test/e2e/network/dns_common.go:526 @ 03/07/23 21:51:43.654 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns_common.go:525 @ 03/07/23 21:51:43.664 (10ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns.go:337 @ 03/07/23 21:51:43.664 STEP: deleting the test externalName service - test/e2e/network/dns.go:338 @ 03/07/23 21:51:43.665 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns.go:337 @ 03/07/23 21:51:43.67 (6ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:51:43.67 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:35 @ 03/07/23 21:51:43.67 (0s) > Enter [DeferCleanup (Each)] [sig-network] DNS - dump namespaces | framework.go:209 @ 03/07/23 21:51:43.67 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:51:43.67 STEP: Collecting events from namespace "dns-4413". - test/e2e/framework/debug/dump.go:42 @ 03/07/23 21:51:43.67 STEP: Found 15 events. - test/e2e/framework/debug/dump.go:46 @ 03/07/23 21:51:43.673 Mar 7 21:51:43.673: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: { } Scheduled: Successfully assigned dns-4413/dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f to 172.17.0.1 Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Created: Created container webserver Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Started: Started container webserver Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Created: Created container querier Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Started: Started container querier Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7" already present on machine Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Created: Created container jessie-querier Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:58 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Started: Started container jessie-querier Mar 7 21:51:43.673: INFO: At 2023-03-07 21:40:59 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 7 21:51:43.673: INFO: At 2023-03-07 21:41:01 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} Failed: Error: failed to start containerd task "jessie-querier": cannot start a stopped process: unknown Mar 7 21:51:43.673: INFO: At 2023-03-07 21:41:03 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container webserver in pod dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f_dns-4413(451a85bf-cfdd-4a65-817d-828be767abf2) Mar 7 21:51:43.673: INFO: At 2023-03-07 21:41:03 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container querier in pod dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f_dns-4413(451a85bf-cfdd-4a65-817d-828be767abf2) Mar 7 21:51:43.673: INFO: At 2023-03-07 21:41:03 +0000 UTC - event for dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container jessie-querier in pod dns-test-ca350306-f3a8-484c-bd30-22da8ebf279f_dns-4413(451a85bf-cfdd-4a65-817d-828be767abf2) Mar 7 21:51:43.682: INFO: POD NODE PHASE GRACE CONDITIONS Mar 7 21:51:43.682: INFO: Mar 7 21:51:43.687: INFO: Logging node info for node 172.17.0.1 Mar 7 21:51:43.694: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 193f4c27-48e4-45d1-9ab8-e7363f1f27fa 6875 0 2023-03-07 21:00:12 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-07 21:00:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-07 21:48:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-07 21:48:31 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-07 21:48:31 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-07 21:48:31 +0000 UTC,LastTransitionTime:2023-03-07 21:00:12 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-07 21:48:31 +0000 UTC,LastTransitionTime:2023-03-07 21:00:13 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:0dd6f17b-5643-5161-a044-7635f62c1e8a,BootID:39f45946-8088-4927-a7aa-7edfae5d5b44,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,KubeProxyVersion:v1.27.0-alpha.3.190+dcc34e0a318fce,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 7 21:51:43.694: INFO: Logging kubelet events for node 172.17.0.1 Mar 7 21:51:43.698: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 7 21:51:43.715: INFO: coredns-55fddfc79-6ldw8 started at 2023-03-07 21:00:16 +0000 UTC (0+1 container statuses recorded) Mar 7 21:51:43.715: INFO: Container coredns ready: false, restart count 15 Mar 7 21:51:43.744: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/07/23 21:51:43.744 (74ms) < Exit [DeferCleanup (Each)] [sig-network] DNS - dump namespaces | framework.go:209 @ 03/07/23 21:51:43.744 (74ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - tear down framework | framework.go:206 @ 03/07/23 21:51:43.744 STEP: Destroying namespace "dns-4413" for this suite. - test/e2e/framework/framework.go:351 @ 03/07/23 21:51:43.744 < Exit [DeferCleanup (Each)] [sig-network] DNS - tear down framework | framework.go:206 @ 03/07/23 21:51:43.75 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:51:43.75 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/07/23 21:51:43.75 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sDNS\sshould\sprovide\sDNS\sfor\sthe\scluster\s\s\[Conformance\]$'
[FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/07/23 21:38:06.512
> Enter [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/07/23 21:27:40.218 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/07/23 21:27:40.218 Mar 7 21:27:40.218: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename dns - test/e2e/framework/framework.go:250 @ 03/07/23 21:27:40.219 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/07/23 21:27:40.23 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/07/23 21:27:40.234 < Exit [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/07/23 21:27:40.238 (20ms) > Enter [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:27:40.238 < Exit [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/07/23 21:27:40.238 (0s) > Enter [It] should provide DNS for the cluster [Conformance] - test/e2e/network/dns.go:50 @ 03/07/23 21:27:40.238 STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;sleep 1; done - test/e2e/network/dns.go:60 @ 03/07/23 21:27:40.238 STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;sleep 1; done - test/e2e/network/dns.go:61 @ 03/07/23 21:27:40.238 STEP: creating a pod to probe DNS - test/e2e/network/dns.go:64 @ 03/07/23 21:27:40.238 STEP: submitting the pod to kubernetes - test/e2e/network/dns_common.go:496 @ 03/07/23 21:27:40.238 STEP: retrieving the pod - test/e2e/network/dns_common.go:508 @ 03/07/23 21:27:52.274 STEP: looking for the results for each expected name from probers - test/e2e/network/dns_common.go:514 @ 03/07/23 21:27:52.278 Mar 7 21:27:52.281: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:27:52.284: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:27:52.287: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:27:52.290: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:27:52.290: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:28:03.399: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:28:06.467: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:28:09.539: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:28:19.747: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:28:19.747: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:28:25.351: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:28:28.419: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:28:38.691: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:28:38.695: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:28:38.695: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:29:12.296: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:12.299: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:12.304: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:12.310: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:12.310: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:29:27.591: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:27.596: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:27.599: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:27.602: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:27.602: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:29:32.296: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:32.299: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:32.302: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:32.305: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:32.305: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:29:40.355: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:40.359: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:40.362: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:40.365: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:29:40.365: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:30:12.295: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:15.363: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:21.475: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:25.575: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:25.575: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:30:33.411: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:36.483: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:39.555: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:49.763: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:49.763: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:30:55.364: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:30:58.435: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:01.507: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:01.511: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:01.511: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:31:32.296: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:35.363: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:38.435: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:41.507: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:41.507: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:31:42.296: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:42.299: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:42.302: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:42.305: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:42.305: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:31:50.371: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:50.374: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:50.377: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:50.380: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:31:50.380: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:32:22.296: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:32:25.379: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:32:25.383: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:32:25.386: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:32:25.386: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 5m0.02s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 5m0s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 4m47.961s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select, 2 minutes] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc002d2da00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2da00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2da00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2da00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0039db7a0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc002d2d900) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2d900, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc002d2d900, {0x100?, 0xc0000bb000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc002d2d900) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0039f8900, {0x7f24250, 0xc0039db620}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0039f8900, {0x7f24250, 0xc0039db620}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:32:57.296: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 5m20.022s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 5m20.002s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m7.963s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc003c82200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc003c82200, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc003c82200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc003c82200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003bb4450?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc003c82100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c82100, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc003c82100, {0x100?, 0xc000101c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc003c82100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0000fe000, {0x7f24250, 0xc0039db620}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0000fe000, {0x7f24250, 0xc0039db620}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:33:00.355: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:03.427: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:03.430: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:03.430: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:33:07.295: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:07.298: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:07.302: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:07.305: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:07.305: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:33:15.363: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:15.367: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server could not find the requested resource (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:15.370: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server could not find the requested resource (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:15.373: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server could not find the requested resource (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:15.373: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 5m40.024s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 5m40.004s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m27.964s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc003d28200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc003d28200, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc003d28200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc003d28200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003b7a6c0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc003d28100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003d28100, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc003d28100, {0x100?, 0xc0000bb000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc003d28100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0027dec60, {0x7f24250, 0xc003b7a540}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0027dec60, {0x7f24250, 0xc003b7a540}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 6m0.026s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 6m0.006s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m47.966s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc003d28200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc003d28200, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc003d28200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc003d28200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003b7a6c0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc003d28100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003d28100, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc003d28100, {0x100?, 0xc0000bb000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc003d28100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0027dec60, {0x7f24250, 0xc003b7a540}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0027dec60, {0x7f24250, 0xc003b7a540}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:33:47.295: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:47.299: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:47.302: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:47.305: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:47.305: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:33:52.296: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:52.299: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:52.303: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:52.306: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:33:52.306: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 6m20.028s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 6m20.008s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m7.969s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc003c83400) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc003c83400, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc003c83400?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc003c83400) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003bb58c0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc003c83300) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c83300, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc003c83300, {0x100?, 0xc000101c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc003c83300) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0000fec60, {0x7f24250, 0xc003bb5740}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0000fec60, {0x7f24250, 0xc003bb5740}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:34:00.356: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:03.427: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:03.431: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:03.434: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:03.434: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 6m40.031s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 6m40.011s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m27.971s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc003c83e00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc003c83e00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc003c83e00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc003c83e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc00329c2a0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc003c83d00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c83d00, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc003c83d00, {0x100?, 0xc000101c00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc003c83d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0000fefc0, {0x7f24250, 0xc00329c120}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0000fefc0, {0x7f24250, 0xc00329c120}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:34:25.831: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:25.835: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:25.839: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:36.067: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:36.067: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:34:37.295: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:37.299: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:37.302: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:37.305: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:37.305: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 7m0.033s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 7m0.012s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m47.973s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:34:45.347: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:45.351: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:45.355: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:45.358: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:34:45.358: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 7m20.035s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 7m20.014s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m7.975s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc000e3c000) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc000e3c000, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc000e3c000?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc000e3c000) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003304fc0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc000159d00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc000159d00, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc000159d00, {0x100?, 0xc000600000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc000159d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e42360, {0x7f24250, 0xc003304c60}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e42360, {0x7f24250, 0xc003304c60}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:35:17.296: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:35:17.299: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:35:17.303: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:35:17.306: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:35:17.306: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 7m40.036s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 7m40.016s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m27.977s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:647 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:35:25.347: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:35:25.351: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:35:25.355: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:35:25.358: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:35:25.358: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 8m0.038s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 8m0.018s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m47.978s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc0017f7400) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc0017f7400, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc0017f7400?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc0017f7400) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc0039dad50?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc0017f7300) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017f7300, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc0017f7300, {0x100?, 0xc0000bb000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc0017f7300) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0000fe480, {0x7f24250, 0xc0039dabd0}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0000fe480, {0x7f24250, 0xc0039dabd0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:35:57.297: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 8m20.04s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 8m20.02s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m7.98s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc003c82300) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc003c82300, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc003c82300?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc003c82300) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003b7a150?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc003c82200) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c82200, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc003c82200, {0x100?, 0xc0000bb000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc003c82200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000e427e0, {0x7f24250, 0xc0039dabd0}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000e427e0, {0x7f24250, 0xc0039dabd0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 7 21:36:00.355: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:36:03.427: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:36:06.499: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:36:06.499: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Mar 7 21:36:10.371: INFO: Unable to read wheezy_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:36:10.375: INFO: Unable to read wheezy_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:36:10.378: INFO: Unable to read jessie_udp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:36:10.381: INFO: Unable to read jessie_tcp@kubernetes.default.svc.cluster.local from pod dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136: the server is currently unable to handle the request (get pods dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136) Mar 7 21:36:10.381: INFO: Lookups using dns-6322/dns-test-66452c5c-52a0-4644-9f35-86d7c19aa136 failed for: [wheezy_udp@kubernetes.default.svc.cluster.local wheezy_tcp@kubernetes.default.svc.cluster.local jessie_udp@kubernetes.default.svc.cluster.local jessie_tcp@kubernetes.default.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 8m40.042s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 8m40.022s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m27.982s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc002d2da00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2da00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2da00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2da00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x6f8e780?, 0xc003bb42d0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc001197e00, 0xc002d2d900) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002d2d900, {0x7ef2ae0, 0xc001197e00}, {0x8?, 0x746c600?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003936120, 0xc002d2d900, {0x100?, 0xc0000bb000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003936120, 0xc002d2d900) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.1.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0039f8b40, {0x7f24250, 0xc003bb4120}, 0xc003d983f0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0039f8b40, {0x7f24250, 0xc003bb4120}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f07c801c938?, 0xc003c06b40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f07c801c938?, 0xc003c06b40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f07c801c938, 0xc003c06b40}, 0xc0032f5200, 0x2f83f2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f07c801c938, 0xc003c06b40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:534 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f07c801c938?, 0xc003c06b40?}, {0xc003c06b80?, 0x7f07c801c938?, 0xc003c06b40?}, {0x7553820?, 0x2d?}, 0xc00200d340?, {0x7f62698, 0xc003e8b1e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f07c801c938, 0xc003c06b40}, 0xc000ca91d0, 0xc003946d80, {0xc003c06b80, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.1({0x7f07c801c938, 0xc003c06b40}) test/e2e/network/dns.go:66 | ginkgo.By("creating a pod to probe DNS") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x7f2c728?, 0xc003c06b40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide DNS for the cluster [Conformance] (Spec Runtime: 9m0.043s) test/e2e/network/dns.go:50 In [It] (Node Runtime: 9m0.023s) test/e2e/network/dns.go:50 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m47.984s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 3434 [select, 2 minutes] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000f48600, 0xc002d2da00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc0017fb8c0, 0xc002d2da00, {0xa0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0020a03c0?}, 0xc002d2da00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0020a03c0, 0xc002d2da00) /go/src/k8s.io/kubernetes/_ou