PR | rata: hack/local-up-cluster.sh: Remove old dynamic certs |
Result | FAILURE |
Tests | 17 failed / 88 succeeded |
Started | |
Elapsed | 2h1m |
Revision | |
Builder | b34a6e49-c253-11ed-b381-5a96e7d210c2 |
Refs |
master:c8f001d7 116385:f1a51265 |
infra-commit | 631534302 |
job-version | v1.27.0-alpha.3.554+bdf18032e96dd3 |
kubetest-version | v20230222-b5208facd4 |
repo | k8s.io/kubernetes |
repo-commit | bdf18032e96dd3e727a4ad31353374ac2349100b |
repos | {u'k8s.io/kubernetes': u'master:c8f001d798eb81a92e61e467fca74ca180de6fc6,116385:f1a512657f2d3ca53dd9629d7977c12f561fa3b9'} |
revision | v1.27.0-alpha.3.554+bdf18032e96dd3 |
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sCronJob\sshould\sreplace\sjobs\swhen\sReplaceConcurrent\s\[Conformance\]$'
[FAILED] Failed to replace CronJob replace-27979878 in namespace cronjob-344: more than one job is running [{TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27979878 GenerateName: Namespace:cronjob-344 SelfLink: UID:1087b015-81a6-4b55-871b-c8814912c3e0 ResourceVersion:2188 Generation:1 CreationTimestamp:2023-03-14 11:18:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:1087b015-81a6-4b55-871b-c8814912c3e0 job-name:replace-27979878] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:21793a1c-3353-434f-bd67-6d0cec618bca Controller:0xc000d2cd40 BlockOwnerDeletion:0xc000d2cd41}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:18:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"21793a1c-3353-434f-bd67-6d0cec618bca\"}":{}}},"f:spec":{"f:backoffLimit":{},"f:completionMode":{},"f:completions":{},"f:parallelism":{},"f:suspend":{},"f:template":{"f:spec":{"f:containers":{"k:{\"name\":\"c\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{},"f:volumeMounts":{".":{},"k:{\"mountPath\":\"/data\"}":{".":{},"f:mountPath":{},"f:name":{}}}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:volumes":{".":{},"k:{\"name\":\"data\"}":{".":{},"f:emptyDir":{},"f:name":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:18:07 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:status":{"f:conditions":{},"f:failed":{},"f:ready":{},"f:startTime":{},"f:uncountedTerminatedPods":{}}} Subresource:status}]} Spec:{Parallelism:0xc000d2cd78 Completions:0xc000d2cd7c ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc000d2ce08 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: 1087b015-81a6-4b55-871b-c8814912c3e0,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:1087b015-81a6-4b55-871b-c8814912c3e0 job-name:replace-27979878] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc000d2ce00 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0012c4650 Suspend:0xc000d2ce2a} Status:{Conditions:[{Type:Failed Status:True LastProbeTime:2023-03-14 11:18:07 +0000 UTC LastTransitionTime:2023-03-14 11:18:07 +0000 UTC Reason:BackoffLimitExceeded Message:Job has reached the specified backoff limit}] StartTime:2023-03-14 11:18:00 +0000 UTC CompletionTime:<nil> Active:0 Succeeded:0 Failed:1 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc000d2ce3c}} {TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27979879 GenerateName: Namespace:cronjob-344 SelfLink: UID:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e ResourceVersion:2268 Generation:1 CreationTimestamp:2023-03-14 11:19:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e job-name:replace-27979879] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:21793a1c-3353-434f-bd67-6d0cec618bca Controller:0xc000d2cea0 BlockOwnerDeletion:0xc000d2cea1}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:19:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"21793a1c-3353-434f-bd67-6d0cec618bca\"}":{}}},"f:spec":{"f:backoffLimit":{},"f:completionMode":{},"f:completions":{},"f:parallelism":{},"f:suspend":{},"f:template":{"f:spec":{"f:containers":{"k:{\"name\":\"c\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{},"f:volumeMounts":{".":{},"k:{\"mountPath\":\"/data\"}":{".":{},"f:mountPath":{},"f:name":{}}}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:volumes":{".":{},"k:{\"name\":\"data\"}":{".":{},"f:emptyDir":{},"f:name":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:19:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:status":{"f:active":{},"f:ready":{},"f:startTime":{},"f:uncountedTerminatedPods":{}}} Subresource:status}]} Spec:{Parallelism:0xc000d2ced8 Completions:0xc000d2cedc ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc000d2cf68 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: d9f38524-cfae-4a7b-9b9a-a6956baa3e6e,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e job-name:replace-27979879] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc000d2cf60 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0012c4680 Suspend:0xc000d2cf8a} Status:{Conditions:[] StartTime:2023-03-14 11:19:00 +0000 UTC CompletionTime:<nil> Active:1 Succeeded:0 Failed:0 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc000d2cf8c}}] In [It] at: test/e2e/apps/cronjob.go:185 @ 03/14/23 11:19:01.329from junit_01.xml
> Enter [BeforeEach] [sig-apps] CronJob - set up framework | framework.go:191 @ 03/14/23 11:17:13.275 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:17:13.275 Mar 14 11:17:13.275: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename cronjob - test/e2e/framework/framework.go:250 @ 03/14/23 11:17:13.278 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:17:13.296 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:17:13.301 < Exit [BeforeEach] [sig-apps] CronJob - set up framework | framework.go:191 @ 03/14/23 11:17:13.306 (32ms) > Enter [BeforeEach] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:17:13.306 < Exit [BeforeEach] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:17:13.307 (0s) > Enter [It] should replace jobs when ReplaceConcurrent [Conformance] - test/e2e/apps/cronjob.go:161 @ 03/14/23 11:17:13.307 STEP: Creating a ReplaceConcurrent cronjob - test/e2e/apps/cronjob.go:162 @ 03/14/23 11:17:13.307 STEP: Ensuring a job is scheduled - test/e2e/apps/cronjob.go:168 @ 03/14/23 11:17:13.313 STEP: Ensuring exactly one is scheduled - test/e2e/apps/cronjob.go:172 @ 03/14/23 11:18:01.318 STEP: Ensuring exactly one running job exists by listing jobs explicitly - test/e2e/apps/cronjob.go:177 @ 03/14/23 11:18:01.321 STEP: Ensuring the job is replaced with a new one - test/e2e/apps/cronjob.go:183 @ 03/14/23 11:18:01.324 Mar 14 11:19:01.329: INFO: Unexpected error: Failed to replace CronJob replace-27979878 in namespace cronjob-344: <*errors.errorString | 0xc0012c5230>: { s: "more than one job is running [{TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27979878 GenerateName: Namespace:cronjob-344 SelfLink: UID:1087b015-81a6-4b55-871b-c8814912c3e0 ResourceVersion:2188 Generation:1 CreationTimestamp:2023-03-14 11:18:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:1087b015-81a6-4b55-871b-c8814912c3e0 job-name:replace-27979878] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:21793a1c-3353-434f-bd67-6d0cec618bca Controller:0xc000d2cd40 BlockOwnerDeletion:0xc000d2cd41}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:18:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{\"f:metadata\":{\"f:ownerReferences\":{\".\":{},\"k:{\\\"uid\\\":\\\"21793a1c-3353-434f-bd67-6d0cec618bca\\\"}\":{}}},\"f:spec\":{\"f:backoffLimit\":{},\"f:completionMode\":{},\"f:completions\":{},\"f:parallelism\":{},\"f:suspend\":{},\"f:template\":{\"f:spec\":{\"f:containers\":{\"k:{\\\"name\\\":\\\"c\\\"}\":{\".\":{},\"f:command\":{},\"f:image\":{},\"f:imagePullPolicy\":{},\"f:name\":{},\"f:resources\":{},\"f:terminationMessagePath\":{},\"f:terminationMessagePolicy\":{},\"f:volumeMounts\":{\".\":{},\"k:{\\\"mountPath\\\":\\\"/data\\\"}\":{\".\":{},\"f:mountPath\":{},\"f:name\":{}}}}},\"f:dnsPolicy\":{},\"f:restartPolicy\":{},\"f:schedulerName\":{},\"f:securityContext\":{},\"f:terminationGracePeriodSeconds\":{},\"f:volumes\":{\".\":{},\"k:{\\\"name\\\":\\\"data\\\"}\":{\".\":{},\"f:emptyDir\":{},\"f:name\":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:18:07 +0000 UTC FieldsType:FieldsV1 FieldsV1:{\"f:status\":{\"f:conditions\":{},\"f:failed\":{},\"f:ready\":{},\"f:startTime\":{},\"f:uncountedTerminatedPods\":{}}} Subresource:status}]} Spec:{Parallelism:0xc000d2cd78 Completions:0xc000d2cd7c ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc000d2ce08 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: 1087b015-81a6-4b55-871b-c8814912c3e0,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:1087b015-81a6-4b55-871b-c8814912c3e0 job-name:replace-27979878] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc000d2ce00 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0012c4650 Suspend:0xc000d2ce2a} Status:{Conditions:[{Type:Failed Status:True LastProbeTime:2023-03-14 11:18:07 +0000 UTC LastTransitionTime:2023-03-14 11:18:07 +0000 UTC Reason:BackoffLimitExceeded Message:Job has reached the specified backoff limit}] StartTime:2023-03-14 11:18:00 +0000 UTC CompletionTime:<nil> Active:0 Succeeded:0 Failed:1 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc000d2ce3c}} {TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27979879 GenerateName: Namespace:cronjob-344 SelfLink: UID:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e ResourceVersion:2268 Generation:1 CreationTimestamp:2023-03-14 11:19:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e job-name:replace-27979879] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:21793a1c-3353-434f-bd67-6d0cec618bca Controller:0xc000d2cea0 BlockOwnerDeletion:0xc000d2cea1}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:19:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{\"f:metadata\":{\"f:ownerReferences\":{\".\":{},\"k:{\\\"uid\\\":\\\"21793a1c-3353-434f-bd67-6d0cec618bca\\\"}\":{}}},\"f:spec\":{\"f:backoffLimit\":{},\"f:completionMode\":{},\"f:completions\":{},\"f:parallelism\":{},\"f:suspend\":{},\"f:template\":{\"f:spec\":{\"f:containers\":{\"k:{\\\"name\\\":\\\"c\\\"}\":{\".\":{},\"f:command\":{},\"f:image\":{},\"f:imagePullPolicy\":{},\"f:name\":{},\"f:resources\":{},\"f:terminationMessagePath\":{},\"f:terminationMessagePolicy\":{},\"f:volumeMounts\":{\".\":{},\"k:{\\\"mountPath\\\":\\\"/data\\\"}\":{\".\":{},\"f:mountPath\":{},\"f:name\":{}}}}},\"f:dnsPolicy\":{},\"f:restartPolicy\":{},\"f:schedulerName\":{},\"f:securityContext\":{},\"f:terminationGracePeriodSeconds\":{},\"f:volumes\":{\".\":{},\"k:{\\\"name\\\":\\\"data\\\"}\":{\".\":{},\"f:emptyDir\":{},\"f:name\":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:19:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{\"f:status\":{\"f:active\":{},\"f:ready\":{},\"f:startTime\":{},\"f:uncountedTerminatedPods\":{}}} Subresource:status}]} Spec:{Parallelism:0xc000d2ced8 Completions:0xc000d2cedc ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc000d2cf68 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: d9f38524-cfae-4a7b-9b9a-a6956baa3e6e,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e job-name:replace-27979879] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil ... Gomega truncated this representation as it exceeds 'format.MaxLength'. Consider having the object provide a custom 'GomegaStringer' representation or adjust the parameters in Gomega's 'format' package. Learn more here: https://onsi.github.io/gomega/#adjusting-output [FAILED] Failed to replace CronJob replace-27979878 in namespace cronjob-344: more than one job is running [{TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27979878 GenerateName: Namespace:cronjob-344 SelfLink: UID:1087b015-81a6-4b55-871b-c8814912c3e0 ResourceVersion:2188 Generation:1 CreationTimestamp:2023-03-14 11:18:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:1087b015-81a6-4b55-871b-c8814912c3e0 job-name:replace-27979878] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:21793a1c-3353-434f-bd67-6d0cec618bca Controller:0xc000d2cd40 BlockOwnerDeletion:0xc000d2cd41}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:18:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"21793a1c-3353-434f-bd67-6d0cec618bca\"}":{}}},"f:spec":{"f:backoffLimit":{},"f:completionMode":{},"f:completions":{},"f:parallelism":{},"f:suspend":{},"f:template":{"f:spec":{"f:containers":{"k:{\"name\":\"c\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{},"f:volumeMounts":{".":{},"k:{\"mountPath\":\"/data\"}":{".":{},"f:mountPath":{},"f:name":{}}}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:volumes":{".":{},"k:{\"name\":\"data\"}":{".":{},"f:emptyDir":{},"f:name":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:18:07 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:status":{"f:conditions":{},"f:failed":{},"f:ready":{},"f:startTime":{},"f:uncountedTerminatedPods":{}}} Subresource:status}]} Spec:{Parallelism:0xc000d2cd78 Completions:0xc000d2cd7c ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc000d2ce08 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: 1087b015-81a6-4b55-871b-c8814912c3e0,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:1087b015-81a6-4b55-871b-c8814912c3e0 job-name:replace-27979878] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc000d2ce00 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0012c4650 Suspend:0xc000d2ce2a} Status:{Conditions:[{Type:Failed Status:True LastProbeTime:2023-03-14 11:18:07 +0000 UTC LastTransitionTime:2023-03-14 11:18:07 +0000 UTC Reason:BackoffLimitExceeded Message:Job has reached the specified backoff limit}] StartTime:2023-03-14 11:18:00 +0000 UTC CompletionTime:<nil> Active:0 Succeeded:0 Failed:1 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc000d2ce3c}} {TypeMeta:{Kind: APIVersion:} ObjectMeta:{Name:replace-27979879 GenerateName: Namespace:cronjob-344 SelfLink: UID:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e ResourceVersion:2268 Generation:1 CreationTimestamp:2023-03-14 11:19:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e job-name:replace-27979879] Annotations:map[batch.kubernetes.io/job-tracking:] OwnerReferences:[{APIVersion:batch/v1 Kind:CronJob Name:replace UID:21793a1c-3353-434f-bd67-6d0cec618bca Controller:0xc000d2cea0 BlockOwnerDeletion:0xc000d2cea1}] Finalizers:[] ManagedFields:[{Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:19:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"21793a1c-3353-434f-bd67-6d0cec618bca\"}":{}}},"f:spec":{"f:backoffLimit":{},"f:completionMode":{},"f:completions":{},"f:parallelism":{},"f:suspend":{},"f:template":{"f:spec":{"f:containers":{"k:{\"name\":\"c\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{},"f:volumeMounts":{".":{},"k:{\"mountPath\":\"/data\"}":{".":{},"f:mountPath":{},"f:name":{}}}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:volumes":{".":{},"k:{\"name\":\"data\"}":{".":{},"f:emptyDir":{},"f:name":{}}}}}}} Subresource:} {Manager:kube-controller-manager Operation:Update APIVersion:batch/v1 Time:2023-03-14 11:19:00 +0000 UTC FieldsType:FieldsV1 FieldsV1:{"f:status":{"f:active":{},"f:ready":{},"f:startTime":{},"f:uncountedTerminatedPods":{}}} Subresource:status}]} Spec:{Parallelism:0xc000d2ced8 Completions:0xc000d2cedc ActiveDeadlineSeconds:<nil> PodFailurePolicy:nil BackoffLimit:0xc000d2cf68 Selector:&LabelSelector{MatchLabels:map[string]string{controller-uid: d9f38524-cfae-4a7b-9b9a-a6956baa3e6e,},MatchExpressions:[]LabelSelectorRequirement{},} ManualSelector:<nil> Template:{ObjectMeta:{Name: GenerateName: Namespace: SelfLink: UID: ResourceVersion: Generation:0 CreationTimestamp:0001-01-01 00:00:00 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[controller-uid:d9f38524-cfae-4a7b-9b9a-a6956baa3e6e job-name:replace-27979879] Annotations:map[] OwnerReferences:[] Finalizers:[] ManagedFields:[]} Spec:{Volumes:[{Name:data VolumeSource:{HostPath:nil EmptyDir:&EmptyDirVolumeSource{Medium:,SizeLimit:<nil>,} GCEPersistentDisk:nil AWSElasticBlockStore:nil GitRepo:nil Secret:nil NFS:nil ISCSI:nil Glusterfs:nil PersistentVolumeClaim:nil RBD:nil FlexVolume:nil Cinder:nil CephFS:nil Flocker:nil DownwardAPI:nil FC:nil AzureFile:nil ConfigMap:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil Projected:nil PortworxVolume:nil ScaleIO:nil StorageOS:nil CSI:nil Ephemeral:nil}}] InitContainers:[] Containers:[{Name:c Image:registry.k8s.io/e2e-test-images/busybox:1.29-4 Command:[sleep 300] Args:[] WorkingDir: Ports:[] EnvFrom:[] Env:[] Resources:{Limits:map[] Requests:map[] Claims:[]} ResizePolicy:[] VolumeMounts:[{Name:data ReadOnly:false MountPath:/data SubPath: MountPropagation:<nil> SubPathExpr:}] VolumeDevices:[] LivenessProbe:nil ReadinessProbe:nil StartupProbe:nil Lifecycle:nil TerminationMessagePath:/dev/termination-log TerminationMessagePolicy:File ImagePullPolicy:IfNotPresent SecurityContext:nil Stdin:false StdinOnce:false TTY:false}] EphemeralContainers:[] RestartPolicy:OnFailure TerminationGracePeriodSeconds:0xc000d2cf60 ActiveDeadlineSeconds:<nil> DNSPolicy:ClusterFirst NodeSelector:map[] ServiceAccountName: DeprecatedServiceAccount: AutomountServiceAccountToken:<nil> NodeName: HostNetwork:false HostPID:false HostIPC:false ShareProcessNamespace:<nil> SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} ImagePullSecrets:[] Hostname: Subdomain: Affinity:nil SchedulerName:default-scheduler Tolerations:[] HostAliases:[] PriorityClassName: Priority:<nil> DNSConfig:nil ReadinessGates:[] RuntimeClassName:<nil> EnableServiceLinks:<nil> PreemptionPolicy:<nil> Overhead:map[] TopologySpreadConstraints:[] SetHostnameAsFQDN:<nil> OS:nil HostUsers:<nil> SchedulingGates:[] ResourceClaims:[]}} TTLSecondsAfterFinished:<nil> CompletionMode:0xc0012c4680 Suspend:0xc000d2cf8a} Status:{Conditions:[] StartTime:2023-03-14 11:19:00 +0000 UTC CompletionTime:<nil> Active:1 Succeeded:0 Failed:0 CompletedIndexes: UncountedTerminatedPods:&UncountedTerminatedPods{Succeeded:[],Failed:[],} Ready:0xc000d2cf8c}}] In [It] at: test/e2e/apps/cronjob.go:185 @ 03/14/23 11:19:01.329 < Exit [It] should replace jobs when ReplaceConcurrent [Conformance] - test/e2e/apps/cronjob.go:161 @ 03/14/23 11:19:01.329 (1m48.023s) > Enter [AfterEach] [sig-apps] CronJob - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:19:01.329 Mar 14 11:19:01.329: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-apps] CronJob - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:19:01.333 (3ms) > Enter [DeferCleanup (Each)] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:19:01.333 < Exit [DeferCleanup (Each)] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:19:01.333 (0s) > Enter [DeferCleanup (Each)] [sig-apps] CronJob - dump namespaces | framework.go:209 @ 03/14/23 11:19:01.333 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:19:01.333 STEP: Collecting events from namespace "cronjob-344". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:19:01.333 STEP: Found 14 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:19:01.336 Mar 14 11:19:01.336: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for replace-27979878-jjx8r: { } Scheduled: Successfully assigned cronjob-344/replace-27979878-jjx8r to 172.17.0.1 Mar 14 11:19:01.336: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for replace-27979879-5wp5w: { } Scheduled: Successfully assigned cronjob-344/replace-27979879-5wp5w to 172.17.0.1 Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:00 +0000 UTC - event for replace: {cronjob-controller } SuccessfulCreate: Created job replace-27979878 Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:00 +0000 UTC - event for replace-27979878: {job-controller } SuccessfulCreate: Created pod: replace-27979878-jjx8r Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:02 +0000 UTC - event for replace-27979878-jjx8r: {kubelet 172.17.0.1} Started: Started container c Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:02 +0000 UTC - event for replace-27979878-jjx8r: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:02 +0000 UTC - event for replace-27979878-jjx8r: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:03 +0000 UTC - event for replace-27979878-jjx8r: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:07 +0000 UTC - event for replace: {cronjob-controller } SawCompletedJob: Saw completed job: replace-27979878, status: Failed Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:07 +0000 UTC - event for replace-27979878: {job-controller } SuccessfulDelete: Deleted pod: replace-27979878-jjx8r Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:07 +0000 UTC - event for replace-27979878: {job-controller } BackoffLimitExceeded: Job has reached the specified backoff limit Mar 14 11:19:01.336: INFO: At 2023-03-14 11:18:09 +0000 UTC - event for replace-27979878-jjx8r: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container c in pod replace-27979878-jjx8r_cronjob-344(b454b1cf-f8fc-4f8a-b514-44275cd38d9e) Mar 14 11:19:01.336: INFO: At 2023-03-14 11:19:00 +0000 UTC - event for replace: {cronjob-controller } SuccessfulCreate: Created job replace-27979879 Mar 14 11:19:01.336: INFO: At 2023-03-14 11:19:00 +0000 UTC - event for replace-27979879: {job-controller } SuccessfulCreate: Created pod: replace-27979879-5wp5w Mar 14 11:19:01.339: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:19:01.339: INFO: replace-27979879-5wp5w 172.17.0.1 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:00 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:00 +0000 UTC ContainersNotReady containers with unready status: [c]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:00 +0000 UTC ContainersNotReady containers with unready status: [c]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:00 +0000 UTC }] Mar 14 11:19:01.339: INFO: Mar 14 11:19:01.352: INFO: Unable to fetch cronjob-344/replace-27979879-5wp5w/c logs: the server rejected our request for an unknown reason (get pods replace-27979879-5wp5w) Mar 14 11:19:01.355: INFO: Logging node info for node 172.17.0.1 Mar 14 11:19:01.358: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 2143 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:17:53 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:19:01.358: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:19:01.361: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:19:01.366: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:19:01.366: INFO: Container coredns ready: false, restart count 7 Mar 14 11:19:01.366: INFO: replace-27979879-5wp5w started at 2023-03-14 11:19:00 +0000 UTC (0+1 container statuses recorded) Mar 14 11:19:01.366: INFO: Container c ready: false, restart count 0 Mar 14 11:19:01.393: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:19:01.393 (60ms) < Exit [DeferCleanup (Each)] [sig-apps] CronJob - dump namespaces | framework.go:209 @ 03/14/23 11:19:01.393 (60ms) > Enter [DeferCleanup (Each)] [sig-apps] CronJob - tear down framework | framework.go:206 @ 03/14/23 11:19:01.393 STEP: Destroying namespace "cronjob-344" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:19:01.393 < Exit [DeferCleanup (Each)] [sig-apps] CronJob - tear down framework | framework.go:206 @ 03/14/23 11:19:01.399 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:19:01.399 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:19:01.399 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sJob\sshould\sadopt\smatching\sorphans\sand\srelease\snon\-matching\spods\s\[Conformance\]$'
[FAILED] Expected <[]v1.Pod | len:8, cap:8>: - metadata: creationTimestamp: "2023-03-14T11:52:36Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:36Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:41Z" name: adopt-release-22d4b namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8078" uid: e10c7471-71f8-4420-989c-08c87ae1dce1 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-sxk6l readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-sxk6l projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://fcb13c406f7a36599bdf0553460c5a5723a6042312e72da061675a136e3f1ab6 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://fcb13c406f7a36599bdf0553460c5a5723a6042312e72da061675a136e3f1ab6 exitCode: 137 finishedAt: "2023-03-14T11:52:39Z" reason: Error startedAt: "2023-03-14T11:52:38Z" hostIP: 172.17.0.1 phase: Failed qosClass: BestEffort startTime: "2023-03-14T11:52:36Z" - metadata: creationTimestamp: "2023-03-14T11:53:15Z" finalizers: - batch.kubernetes.io/job-tracking generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:finalizers: .: {} v:"batch.kubernetes.io/job-tracking": {} f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:53:15Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.97"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::661"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:53:19Z" name: adopt-release-2cxr5 namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8128" uid: 4ef4cf1f-bd47-4a30-911a-ef601c0cdde5 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-bzzrw readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-bzzrw projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:15Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:19Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:19Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:15Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://386b65a7febe3f5d326cba411395f3fdf94798df440851ac1d97df74fb940871 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://386b65a7febe3f5d326cba411395f3fdf94798df440851ac1d97df74fb940871 exitCode: 137 finishedAt: "2023-03-14T11:53:19Z" reason: Error startedAt: "2023-03-14T11:53:17Z" hostIP: 172.17.0.1 phase: Running podIP: 10.88.6.97 podIPs: - ip: 10.88.6.97 - ip: 2001:4860:4860::661 qosClass: BestEffort startTime: "2023-03-14T11:53:15Z" - metadata: creationTimestamp: "2023-03-14T11:52:29Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:29Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.82"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::652"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:35Z" name: adopt-release-4lbkz namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8034" uid: beea789f-bf34-45ed-8b9c-688baed3a3ea spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-k7j76 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-k7j76 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:33Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:33Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://408d71a7b2dfa3fe4a65093e3761d06514cf72c86f707ada90dbeeb695cbd95e image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://408d71a7b2dfa3fe4a65093e3761d06514cf72c86f707ada90dbeeb695cbd95e exitCode: 137 finishedAt: "2023-03-14T11:52:33Z" reason: Error startedAt: "2023-03-14T11:52:32Z" hostIP: 172.17.0.1 phase: Failed podIP: 10.88.6.82 podIPs: - ip: 10.88.6.82 - ip: 2001:4860:4860::652 qosClass: BestEffort startTime: "2023-03-14T11:52:29Z" - metadata: creationTimestamp: "2023-03-14T11:52:51Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:51Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.91"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::65b"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:57Z" name: adopt-release-68cjc namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8111" uid: 18682675-db5c-45e6-a2b2-b0936e5b3815 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ffpt4 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-ffpt4 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:55Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:55Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://b5b853e0d2267dfc82763af8c0e817ee338ddd90a1d3a3610d4a3ddefa162d21 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://b5b853e0d2267dfc82763af8c0e817ee338ddd90a1d3a3610d4a3ddefa162d21 exitCode: 137 finishedAt: "2023-03-14T11:52:55Z" reason: Error startedAt: "2023-03-14T11:52:53Z" hostIP: 172.17.0.1 phase: Failed podIP: 10.88.6.91 podIPs: - ip: 10.88.6.91 - ip: 2001:4860:4860::65b qosClass: BestEffort startTime: "2023-03-14T11:52:51Z" - metadata: creationTimestamp: "2023-03-14T11:52:51Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:51Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:55Z" name: adopt-release-dxff8 namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8110" uid: b2a7bfc9-7108-4241-af38-994e4f48219e spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-kw4zf readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-kw4zf projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://d81f9b863f65509e13309f698fe1b2ea0a5c2fc0f7d42a1fdbf060e5adcd59c2 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://d81f9b863f65509e13309f698fe1b2ea0a5c2fc0f7d42a1fdbf060e5adcd59c2 exitCode: 128 finishedAt: "2023-03-14T11:52:53Z" message: 'failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: can''t get final child''s PID from pipe: EOF: unknown' reason: StartError startedAt: "1970-01-01T00:00:00Z" hostIP: 172.17.0.1 phase: Failed qosClass: BestEffort startTime: "2023-03-14T11:52:51Z" - metadata: creationTimestamp: "2023-03-14T11:53:15Z" finalizers: - batch.kubernetes.io/job-tracking generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:finalizers: .: {} v:"batch.kubernetes.io/job-tracking": {} f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:53:15Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.96"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::660"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:53:19Z" name: adopt-release-f7fx6 namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8131" uid: aade5569-6caf-4935-9095-bc71b8694a14 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-x9b5l readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-x9b5l projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:15Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:19Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:19Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:15Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://2fbe889e73397beaf651465f5eef8e56df927b1d28672425f4adf1818e556c37 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://2fbe889e73397beaf651465f5eef8e56df927b1d28672425f4adf1818e556c37 exitCode: 137 finishedAt: "2023-03-14T11:53:19Z" reason: Error startedAt: "2023-03-14T11:53:17Z" hostIP: 172.17.0.1 phase: Running podIP: 10.88.6.96 podIPs: - ip: 10.88.6.96 - ip: 2001:4860:4860::660 qosClass: BestEffort startTime: "2023-03-14T11:53:15Z" - metadata: creationTimestamp: "2023-03-14T11:52:36Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:36Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:41Z" name: adopt-release-fcvmd namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8076" uid: e8267901-ab24-4c89-af9d-7e6ae7efc9f9 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9mm5n readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-9mm5n projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://220918e60f88f24ef51e0b0cb671443f0854c698a7cfcca4332b871fb9d4e3a9 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://220918e60f88f24ef51e0b0cb671443f0854c698a7cfcca4332b871fb9d4e3a9 exitCode: 137 finishedAt: "2023-03-14T11:52:39Z" reason: Error startedAt: "2023-03-14T11:52:38Z" hostIP: 172.17.0.1 phase: Failed qosClass: BestEffort startTime: "2023-03-14T11:52:36Z" - metadata: creationTimestamp: "2023-03-14T11:52:29Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:29Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.81"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::651"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:35Z" name: adopt-release-knsbv namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8035" uid: c9730ec2-a6b2-40a9-81af-ff6e39d8148f spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-dd44q readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-dd44q projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://9352ce2bbd744c56a7503bbe6b6a45cd30839c41714c4ccdf6d2a90301870c04 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://9352ce2bbd744c56a7503bbe6b6a45cd30839c41714c4ccdf6d2a90301870c04 exitCode: 137 finishedAt: "2023-03-14T11:52:33Z" reason: Error startedAt: "2023-03-14T11:52:32Z" hostIP: 172.17.0.1 phase: Failed podIP: 10.88.6.81 podIPs: - ip: 10.88.6.81 - ip: 2001:4860:4860::651 qosClass: BestEffort startTime: "2023-03-14T11:52:29Z" to have length 2 In [It] at: test/e2e/apps/job.go:530 @ 03/14/23 11:53:19.561from junit_01.xml
> Enter [BeforeEach] [sig-apps] Job - set up framework | framework.go:191 @ 03/14/23 11:52:29.513 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:52:29.514 Mar 14 11:52:29.514: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename job - test/e2e/framework/framework.go:250 @ 03/14/23 11:52:29.514 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:52:29.525 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:52:29.529 < Exit [BeforeEach] [sig-apps] Job - set up framework | framework.go:191 @ 03/14/23 11:52:29.533 (20ms) > Enter [BeforeEach] [sig-apps] Job - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:52:29.533 < Exit [BeforeEach] [sig-apps] Job - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:52:29.533 (0s) > Enter [It] should adopt matching orphans and release non-matching pods [Conformance] - test/e2e/apps/job.go:513 @ 03/14/23 11:52:29.533 STEP: Creating a job - test/e2e/apps/job.go:514 @ 03/14/23 11:52:29.533 STEP: Ensuring active pods == parallelism - test/e2e/apps/job.go:523 @ 03/14/23 11:52:29.539 STEP: Orphaning one of the Job's Pods - test/e2e/apps/job.go:527 @ 03/14/23 11:53:19.544 [FAILED] Expected <[]v1.Pod | len:8, cap:8>: - metadata: creationTimestamp: "2023-03-14T11:52:36Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:36Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:41Z" name: adopt-release-22d4b namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8078" uid: e10c7471-71f8-4420-989c-08c87ae1dce1 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-sxk6l readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-sxk6l projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://fcb13c406f7a36599bdf0553460c5a5723a6042312e72da061675a136e3f1ab6 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://fcb13c406f7a36599bdf0553460c5a5723a6042312e72da061675a136e3f1ab6 exitCode: 137 finishedAt: "2023-03-14T11:52:39Z" reason: Error startedAt: "2023-03-14T11:52:38Z" hostIP: 172.17.0.1 phase: Failed qosClass: BestEffort startTime: "2023-03-14T11:52:36Z" - metadata: creationTimestamp: "2023-03-14T11:53:15Z" finalizers: - batch.kubernetes.io/job-tracking generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:finalizers: .: {} v:"batch.kubernetes.io/job-tracking": {} f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:53:15Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.97"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::661"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:53:19Z" name: adopt-release-2cxr5 namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8128" uid: 4ef4cf1f-bd47-4a30-911a-ef601c0cdde5 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-bzzrw readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-bzzrw projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:15Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:19Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:19Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:15Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://386b65a7febe3f5d326cba411395f3fdf94798df440851ac1d97df74fb940871 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://386b65a7febe3f5d326cba411395f3fdf94798df440851ac1d97df74fb940871 exitCode: 137 finishedAt: "2023-03-14T11:53:19Z" reason: Error startedAt: "2023-03-14T11:53:17Z" hostIP: 172.17.0.1 phase: Running podIP: 10.88.6.97 podIPs: - ip: 10.88.6.97 - ip: 2001:4860:4860::661 qosClass: BestEffort startTime: "2023-03-14T11:53:15Z" - metadata: creationTimestamp: "2023-03-14T11:52:29Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:29Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.82"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::652"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:35Z" name: adopt-release-4lbkz namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8034" uid: beea789f-bf34-45ed-8b9c-688baed3a3ea spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-k7j76 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-k7j76 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:33Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:33Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://408d71a7b2dfa3fe4a65093e3761d06514cf72c86f707ada90dbeeb695cbd95e image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://408d71a7b2dfa3fe4a65093e3761d06514cf72c86f707ada90dbeeb695cbd95e exitCode: 137 finishedAt: "2023-03-14T11:52:33Z" reason: Error startedAt: "2023-03-14T11:52:32Z" hostIP: 172.17.0.1 phase: Failed podIP: 10.88.6.82 podIPs: - ip: 10.88.6.82 - ip: 2001:4860:4860::652 qosClass: BestEffort startTime: "2023-03-14T11:52:29Z" - metadata: creationTimestamp: "2023-03-14T11:52:51Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:51Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.91"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::65b"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:57Z" name: adopt-release-68cjc namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8111" uid: 18682675-db5c-45e6-a2b2-b0936e5b3815 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ffpt4 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-ffpt4 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:55Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:55Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://b5b853e0d2267dfc82763af8c0e817ee338ddd90a1d3a3610d4a3ddefa162d21 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://b5b853e0d2267dfc82763af8c0e817ee338ddd90a1d3a3610d4a3ddefa162d21 exitCode: 137 finishedAt: "2023-03-14T11:52:55Z" reason: Error startedAt: "2023-03-14T11:52:53Z" hostIP: 172.17.0.1 phase: Failed podIP: 10.88.6.91 podIPs: - ip: 10.88.6.91 - ip: 2001:4860:4860::65b qosClass: BestEffort startTime: "2023-03-14T11:52:51Z" - metadata: creationTimestamp: "2023-03-14T11:52:51Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:51Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:55Z" name: adopt-release-dxff8 namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8110" uid: b2a7bfc9-7108-4241-af38-994e4f48219e spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-kw4zf readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-kw4zf projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:51Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://d81f9b863f65509e13309f698fe1b2ea0a5c2fc0f7d42a1fdbf060e5adcd59c2 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://d81f9b863f65509e13309f698fe1b2ea0a5c2fc0f7d42a1fdbf060e5adcd59c2 exitCode: 128 finishedAt: "2023-03-14T11:52:53Z" message: 'failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: can''t get final child''s PID from pipe: EOF: unknown' reason: StartError startedAt: "1970-01-01T00:00:00Z" hostIP: 172.17.0.1 phase: Failed qosClass: BestEffort startTime: "2023-03-14T11:52:51Z" - metadata: creationTimestamp: "2023-03-14T11:53:15Z" finalizers: - batch.kubernetes.io/job-tracking generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:finalizers: .: {} v:"batch.kubernetes.io/job-tracking": {} f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:53:15Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.96"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::660"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:53:19Z" name: adopt-release-f7fx6 namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8131" uid: aade5569-6caf-4935-9095-bc71b8694a14 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-x9b5l readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-x9b5l projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:15Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:19Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:19Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:53:15Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://2fbe889e73397beaf651465f5eef8e56df927b1d28672425f4adf1818e556c37 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://2fbe889e73397beaf651465f5eef8e56df927b1d28672425f4adf1818e556c37 exitCode: 137 finishedAt: "2023-03-14T11:53:19Z" reason: Error startedAt: "2023-03-14T11:53:17Z" hostIP: 172.17.0.1 phase: Running podIP: 10.88.6.96 podIPs: - ip: 10.88.6.96 - ip: 2001:4860:4860::660 qosClass: BestEffort startTime: "2023-03-14T11:53:15Z" - metadata: creationTimestamp: "2023-03-14T11:52:36Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:36Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:41Z" name: adopt-release-fcvmd namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8076" uid: e8267901-ab24-4c89-af9d-7e6ae7efc9f9 spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9mm5n readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-9mm5n projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:36Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://220918e60f88f24ef51e0b0cb671443f0854c698a7cfcca4332b871fb9d4e3a9 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://220918e60f88f24ef51e0b0cb671443f0854c698a7cfcca4332b871fb9d4e3a9 exitCode: 137 finishedAt: "2023-03-14T11:52:39Z" reason: Error startedAt: "2023-03-14T11:52:38Z" hostIP: 172.17.0.1 phase: Failed qosClass: BestEffort startTime: "2023-03-14T11:52:36Z" - metadata: creationTimestamp: "2023-03-14T11:52:29Z" generateName: adopt-release- labels: controller-uid: 1b87e8bc-9a78-455c-86c1-601346c34387 job: adopt-release job-name: adopt-release managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:generateName: {} f:labels: .: {} f:controller-uid: {} f:job: {} f:job-name: {} f:ownerReferences: .: {} k:{"uid":"1b87e8bc-9a78-455c-86c1-601346c34387"}: {} f:spec: f:containers: k:{"name":"c"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:securityContext: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/data"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"data"}: .: {} f:emptyDir: {} f:name: {} manager: kube-controller-manager operation: Update time: "2023-03-14T11:52:29Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.6.81"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::651"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:52:35Z" name: adopt-release-knsbv namespace: job-3772 ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: adopt-release uid: 1b87e8bc-9a78-455c-86c1-601346c34387 resourceVersion: "8035" uid: c9730ec2-a6b2-40a9-81af-ff6e39d8148f spec: containers: - command: - sleep - "1000000" image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imagePullPolicy: IfNotPresent name: c resources: {} securityContext: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /data name: data - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-dd44q readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: data - name: kube-api-access-dd44q projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:52:29Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://9352ce2bbd744c56a7503bbe6b6a45cd30839c41714c4ccdf6d2a90301870c04 image: registry.k8s.io/e2e-test-images/busybox:1.29-4 imageID: registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 lastState: {} name: c ready: false restartCount: 0 started: false state: terminated: containerID: containerd://9352ce2bbd744c56a7503bbe6b6a45cd30839c41714c4ccdf6d2a90301870c04 exitCode: 137 finishedAt: "2023-03-14T11:52:33Z" reason: Error startedAt: "2023-03-14T11:52:32Z" hostIP: 172.17.0.1 phase: Failed podIP: 10.88.6.81 podIPs: - ip: 10.88.6.81 - ip: 2001:4860:4860::651 qosClass: BestEffort startTime: "2023-03-14T11:52:29Z" to have length 2 In [It] at: test/e2e/apps/job.go:530 @ 03/14/23 11:53:19.561 < Exit [It] should adopt matching orphans and release non-matching pods [Conformance] - test/e2e/apps/job.go:513 @ 03/14/23 11:53:19.561 (50.028s) > Enter [AfterEach] [sig-apps] Job - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:53:19.561 Mar 14 11:53:19.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-apps] Job - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:53:19.564 (3ms) > Enter [DeferCleanup (Each)] [sig-apps] Job - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:53:19.564 < Exit [DeferCleanup (Each)] [sig-apps] Job - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:53:19.564 (0s) > Enter [DeferCleanup (Each)] [sig-apps] Job - dump namespaces | framework.go:209 @ 03/14/23 11:53:19.564 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:53:19.564 STEP: Collecting events from namespace "job-3772". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:53:19.564 STEP: Found 40 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:53:19.568 Mar 14 11:53:19.568: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for adopt-release-22d4b: { } Scheduled: Successfully assigned job-3772/adopt-release-22d4b to 172.17.0.1 Mar 14 11:53:19.568: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for adopt-release-2cxr5: { } Scheduled: Successfully assigned job-3772/adopt-release-2cxr5 to 172.17.0.1 Mar 14 11:53:19.568: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for adopt-release-4lbkz: { } Scheduled: Successfully assigned job-3772/adopt-release-4lbkz to 172.17.0.1 Mar 14 11:53:19.568: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for adopt-release-68cjc: { } Scheduled: Successfully assigned job-3772/adopt-release-68cjc to 172.17.0.1 Mar 14 11:53:19.568: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for adopt-release-dxff8: { } Scheduled: Successfully assigned job-3772/adopt-release-dxff8 to 172.17.0.1 Mar 14 11:53:19.568: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for adopt-release-f7fx6: { } Scheduled: Successfully assigned job-3772/adopt-release-f7fx6 to 172.17.0.1 Mar 14 11:53:19.568: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for adopt-release-fcvmd: { } Scheduled: Successfully assigned job-3772/adopt-release-fcvmd to 172.17.0.1 Mar 14 11:53:19.568: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for adopt-release-knsbv: { } Scheduled: Successfully assigned job-3772/adopt-release-knsbv to 172.17.0.1 Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:29 +0000 UTC - event for adopt-release: {job-controller } SuccessfulCreate: Created pod: adopt-release-4lbkz Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:29 +0000 UTC - event for adopt-release: {job-controller } SuccessfulCreate: Created pod: adopt-release-knsbv Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:32 +0000 UTC - event for adopt-release-4lbkz: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:32 +0000 UTC - event for adopt-release-4lbkz: {kubelet 172.17.0.1} Started: Started container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:32 +0000 UTC - event for adopt-release-4lbkz: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:32 +0000 UTC - event for adopt-release-knsbv: {kubelet 172.17.0.1} Started: Started container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:32 +0000 UTC - event for adopt-release-knsbv: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:32 +0000 UTC - event for adopt-release-knsbv: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:36 +0000 UTC - event for adopt-release: {job-controller } SuccessfulCreate: Created pod: adopt-release-fcvmd Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:36 +0000 UTC - event for adopt-release: {job-controller } SuccessfulCreate: Created pod: adopt-release-22d4b Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:38 +0000 UTC - event for adopt-release-22d4b: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:38 +0000 UTC - event for adopt-release-22d4b: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:38 +0000 UTC - event for adopt-release-22d4b: {kubelet 172.17.0.1} Started: Started container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:38 +0000 UTC - event for adopt-release-fcvmd: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:38 +0000 UTC - event for adopt-release-fcvmd: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:38 +0000 UTC - event for adopt-release-fcvmd: {kubelet 172.17.0.1} Started: Started container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:51 +0000 UTC - event for adopt-release: {job-controller } SuccessfulCreate: Created pod: adopt-release-dxff8 Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:51 +0000 UTC - event for adopt-release: {job-controller } SuccessfulCreate: Created pod: adopt-release-68cjc Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:53 +0000 UTC - event for adopt-release-68cjc: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:53 +0000 UTC - event for adopt-release-68cjc: {kubelet 172.17.0.1} Started: Started container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:53 +0000 UTC - event for adopt-release-68cjc: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:53 +0000 UTC - event for adopt-release-dxff8: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:53 +0000 UTC - event for adopt-release-dxff8: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:53:19.568: INFO: At 2023-03-14 11:52:53 +0000 UTC - event for adopt-release-dxff8: {kubelet 172.17.0.1} Failed: Error: failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: can't get final child's PID from pipe: EOF: unknown Mar 14 11:53:19.568: INFO: At 2023-03-14 11:53:15 +0000 UTC - event for adopt-release: {job-controller } SuccessfulCreate: Created pod: adopt-release-2cxr5 Mar 14 11:53:19.568: INFO: At 2023-03-14 11:53:15 +0000 UTC - event for adopt-release: {job-controller } SuccessfulCreate: Created pod: adopt-release-f7fx6 Mar 14 11:53:19.568: INFO: At 2023-03-14 11:53:17 +0000 UTC - event for adopt-release-2cxr5: {kubelet 172.17.0.1} Started: Started container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:53:17 +0000 UTC - event for adopt-release-2cxr5: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:53:17 +0000 UTC - event for adopt-release-2cxr5: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:53:19.568: INFO: At 2023-03-14 11:53:17 +0000 UTC - event for adopt-release-f7fx6: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Mar 14 11:53:19.568: INFO: At 2023-03-14 11:53:17 +0000 UTC - event for adopt-release-f7fx6: {kubelet 172.17.0.1} Created: Created container c Mar 14 11:53:19.568: INFO: At 2023-03-14 11:53:17 +0000 UTC - event for adopt-release-f7fx6: {kubelet 172.17.0.1} Started: Started container c Mar 14 11:53:19.572: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:53:19.572: INFO: adopt-release-22d4b 172.17.0.1 Failed [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:36 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:36 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:36 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:36 +0000 UTC }] Mar 14 11:53:19.572: INFO: adopt-release-2cxr5 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:15 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:19 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:19 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:15 +0000 UTC }] Mar 14 11:53:19.572: INFO: adopt-release-4lbkz 172.17.0.1 Failed [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:29 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:33 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:33 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:29 +0000 UTC }] Mar 14 11:53:19.572: INFO: adopt-release-68cjc 172.17.0.1 Failed [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:55 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:55 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:51 +0000 UTC }] Mar 14 11:53:19.572: INFO: adopt-release-dxff8 172.17.0.1 Failed [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:51 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:51 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:51 +0000 UTC }] Mar 14 11:53:19.572: INFO: adopt-release-f7fx6 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:15 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:19 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:19 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:15 +0000 UTC }] Mar 14 11:53:19.572: INFO: adopt-release-fcvmd 172.17.0.1 Failed [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:36 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:36 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:36 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:36 +0000 UTC }] Mar 14 11:53:19.572: INFO: adopt-release-knsbv 172.17.0.1 Failed [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:29 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:29 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:29 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:52:29 +0000 UTC }] Mar 14 11:53:19.572: INFO: Mar 14 11:53:19.601: INFO: Logging node info for node 172.17.0.1 Mar 14 11:53:19.604: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 7397 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:51:10 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:51:10 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:51:10 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:51:10 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:51:10 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:53:19.604: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:53:19.607: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:53:19.613: INFO: adopt-release-knsbv started at 2023-03-14 11:52:29 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container c ready: false, restart count 0 Mar 14 11:53:19.613: INFO: adopt-release-fcvmd started at 2023-03-14 11:52:36 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container c ready: false, restart count 0 Mar 14 11:53:19.613: INFO: adopt-release-68cjc started at 2023-03-14 11:52:51 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container c ready: false, restart count 0 Mar 14 11:53:19.613: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container coredns ready: false, restart count 13 Mar 14 11:53:19.613: INFO: adopt-release-2cxr5 started at 2023-03-14 11:53:15 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container c ready: false, restart count 0 Mar 14 11:53:19.613: INFO: adopt-release-4lbkz started at 2023-03-14 11:52:29 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container c ready: false, restart count 0 Mar 14 11:53:19.613: INFO: adopt-release-dxff8 started at 2023-03-14 11:52:51 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container c ready: false, restart count 0 Mar 14 11:53:19.613: INFO: adopt-release-f7fx6 started at 2023-03-14 11:53:15 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container c ready: false, restart count 0 Mar 14 11:53:19.613: INFO: adopt-release-22d4b started at 2023-03-14 11:52:36 +0000 UTC (0+1 container statuses recorded) Mar 14 11:53:19.613: INFO: Container c ready: false, restart count 0 Mar 14 11:53:19.645: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:53:19.645 (80ms) < Exit [DeferCleanup (Each)] [sig-apps] Job - dump namespaces | framework.go:209 @ 03/14/23 11:53:19.645 (80ms) > Enter [DeferCleanup (Each)] [sig-apps] Job - tear down framework | framework.go:206 @ 03/14/23 11:53:19.645 STEP: Destroying namespace "job-3772" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:53:19.645 < Exit [DeferCleanup (Each)] [sig-apps] Job - tear down framework | framework.go:206 @ 03/14/23 11:53:19.65 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:53:19.65 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:53:19.65 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sStatefulSet\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sBurst\sscaling\sshould\srun\sto\scompletion\seven\swith\sunhealthy\spods\s\[Slow\]\s\[Conformance\]$'
[FAILED] Failed waiting for pods to enter running: timed out waiting for the condition In [It] at: test/e2e/framework/statefulset/wait.go:58 @ 03/14/23 11:17:02.942from junit_01.xml
> Enter [BeforeEach] [sig-apps] StatefulSet - set up framework | framework.go:191 @ 03/14/23 11:07:02.901 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:07:02.901 Mar 14 11:07:02.901: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename statefulset - test/e2e/framework/framework.go:250 @ 03/14/23 11:07:02.902 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:07:02.913 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:07:02.916 < Exit [BeforeEach] [sig-apps] StatefulSet - set up framework | framework.go:191 @ 03/14/23 11:07:02.92 (19ms) > Enter [BeforeEach] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:07:02.92 < Exit [BeforeEach] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:07:02.92 (0s) > Enter [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:100 @ 03/14/23 11:07:02.92 < Exit [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:100 @ 03/14/23 11:07:02.92 (0s) > Enter [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:115 @ 03/14/23 11:07:02.92 STEP: Creating service test in namespace statefulset-9056 - test/e2e/apps/statefulset.go:120 @ 03/14/23 11:07:02.92 < Exit [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:115 @ 03/14/23 11:07:02.924 (4ms) > Enter [It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] - test/e2e/apps/statefulset.go:701 @ 03/14/23 11:07:02.924 STEP: Creating stateful set ss in namespace statefulset-9056 - test/e2e/apps/statefulset.go:704 @ 03/14/23 11:07:02.924 STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 - test/e2e/apps/statefulset.go:711 @ 03/14/23 11:07:02.93 Mar 14 11:07:02.933: INFO: Found 0 stateful pods, waiting for 1 Mar 14 11:07:12.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Pending - Ready=false Mar 14 11:07:22.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Pending - Ready=false Mar 14 11:07:32.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:07:42.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:07:52.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:08:02.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:08:12.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:08:22.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:08:32.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:08:42.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:08:52.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:09:02.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:09:12.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:09:22.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:09:32.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:09:42.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:09:52.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:10:02.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:10:12.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:10:22.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:10:32.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:10:42.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:10:52.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:11:02.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:11:12.946: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:11:22.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:11:32.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:11:42.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:11:52.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 5m0.023s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 5m0.001s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 4m59.995s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:12:02.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:12:12.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 5m20.026s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 5m20.003s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 5m19.997s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:12:22.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:12:32.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 5m40.027s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 5m40.005s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 5m39.999s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:12:42.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:12:52.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 6m0.028s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 6m0.006s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 6m0s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:13:02.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:13:12.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 6m20.03s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 6m20.007s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 6m20.001s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:13:22.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:13:32.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 6m40.031s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 6m40.008s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 6m40.002s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:13:42.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:13:52.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 7m0.032s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 7m0.01s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 7m0.003s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [runnable] strings.ToLower({0x6a260d3, 0x4}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/strings/strings.go:625 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).URL(0xc003fa6900) vendor/k8s.io/client-go/rest/request.go:497 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).tryThrottleWithInfo(0xc003fa6900, {0x7f164c6bba20, 0xc001069f80}, {0x0, 0x0}) vendor/k8s.io/client-go/rest/request.go:624 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).tryThrottle(...) vendor/k8s.io/client-go/rest/request.go:641 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003fa6900, {0x7f164c6bba20, 0xc001069f80}, 0x2?) vendor/k8s.io/client-go/rest/request.go:965 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003fa6900, {0x7f164c6bba20, 0xc001069f80}) vendor/k8s.io/client-go/rest/request.go:1039 k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/core/v1.(*pods).List(0xc003d8eea0, {0x7f164c6bba20, 0xc001069f80}, {{{0x0, 0x0}, {0x0, 0x0}}, {0xc00407e240, 0x10}, {0x0, ...}, ...}) vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go:99 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x7f164c6bba20, 0xc001069f80}, {0x7233598, 0xc000550ea0}, 0xc000622500) test/e2e/framework/statefulset/rest.go:68 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1({0x7f164c6bba20?, 0xc001069f80?}) test/e2e/framework/statefulset/wait.go:37 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc001069f80?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:14:02.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:14:12.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 7m20.034s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 7m20.011s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 7m20.005s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc005125500) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc005125500, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc005125500?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc005125500) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc004e4be30?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc000d1b120, 0xc005125400) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc005125400, {0x71cb1c0, 0xc000d1b120}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003e26f60, 0xc005125400, {0x100?, 0xc000500400?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003e26f60, 0xc005125400) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0018fcb40, {0x7f164c6bba20, 0xc001069f80}, 0x2?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0018fcb40, {0x7f164c6bba20, 0xc001069f80}) vendor/k8s.io/client-go/rest/request.go:1039 k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/core/v1.(*pods).List(0xc0000d60a0, {0x7f164c6bba20, 0xc001069f80}, {{{0x0, 0x0}, {0x0, 0x0}}, {0xc000d2c2f0, 0x10}, {0x0, ...}, ...}) vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go:99 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x7f164c6bba20, 0xc001069f80}, {0x7233598, 0xc000550ea0}, 0xc000622500) test/e2e/framework/statefulset/rest.go:68 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1({0x7f164c6bba20?, 0xc001069f80?}) test/e2e/framework/statefulset/wait.go:37 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc001069f80?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:14:22.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:14:32.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 7m40.036s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 7m40.014s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 7m40.007s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc001242200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc001242200, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc001242200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc001242200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc002c8a390?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc000d1b120, 0xc001242100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc001242100, {0x71cb1c0, 0xc000d1b120}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003e26f60, 0xc001242100, {0x100?, 0xc000500400?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003e26f60, 0xc001242100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003c9e000, {0x7f164c6bba20, 0xc001069f80}, 0x2?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003c9e000, {0x7f164c6bba20, 0xc001069f80}) vendor/k8s.io/client-go/rest/request.go:1039 k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/core/v1.(*pods).List(0xc000d76020, {0x7f164c6bba20, 0xc001069f80}, {{{0x0, 0x0}, {0x0, 0x0}}, {0xc0029ac040, 0x10}, {0x0, ...}, ...}) vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go:99 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x7f164c6bba20, 0xc001069f80}, {0x7233598, 0xc000550ea0}, 0xc000622500) test/e2e/framework/statefulset/rest.go:68 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1({0x7f164c6bba20?, 0xc001069f80?}) test/e2e/framework/statefulset/wait.go:37 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc001069f80?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:14:42.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:14:52.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:15:02.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 8m0.038s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 8m0.015s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 8m0.009s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:15:12.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:15:22.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 8m20.039s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 8m20.017s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 8m20.011s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:15:32.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:15:42.939: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 8m40.041s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 8m40.018s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 8m40.012s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:15:52.944: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 9m0.044s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 9m0.021s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 9m0.015s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:16:02.945: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:16:12.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:16:22.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 9m20.05s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 9m20.027s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 9m20.021s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:16:32.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:16:42.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 9m40.052s) test/e2e/apps/statefulset.go:701 In [It] (Node Runtime: 9m40.03s) test/e2e/apps/statefulset.go:701 At [By Step] Waiting until all stateful set ss replicas will be running in namespace statefulset-9056 (Step Runtime: 9m40.023s) test/e2e/apps/statefulset.go:711 Spec Goroutine goroutine 1088 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc001069f80}, 0xc003d14df8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc001069f80}, 0x0?, 0x0?, 0xc004a69e80?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x7f164c6bba20?, 0xc001069f80?}, {0x7233598?, 0xc000550ea0?}, 0x0?, 0x0?, 0x6ae6192?) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11({0x7f164c6bba20, 0xc001069f80}) test/e2e/apps/statefulset.go:712 | | ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) > e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) | | ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc001069f80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:16:52.937: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:17:02.938: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Mar 14 11:17:02.941: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false [FAILED] Failed waiting for pods to enter running: timed out waiting for the condition In [It] at: test/e2e/framework/statefulset/wait.go:58 @ 03/14/23 11:17:02.942 < Exit [It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] - test/e2e/apps/statefulset.go:701 @ 03/14/23 11:17:02.942 (10m0.018s) > Enter [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:126 @ 03/14/23 11:17:02.942 Mar 14 11:17:02.945: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=statefulset-9056 describe po ss-0' Mar 14 11:17:03.052: INFO: stderr: "" Mar 14 11:17:03.052: INFO: stdout: "Name: ss-0\nNamespace: statefulset-9056\nPriority: 0\nService Account: default\nNode: 172.17.0.1/172.17.0.1\nStart Time: Tue, 14 Mar 2023 11:07:02 +0000\nLabels: baz=blah\n controller-revision-hash=ss-7b6c9599d5\n foo=bar\n statefulset.kubernetes.io/pod-name=ss-0\nAnnotations: <none>\nStatus: Running\nIP: 10.88.1.105\nIPs:\n IP: 10.88.1.105\n IP: 2001:4860:4860::169\nControlled By: StatefulSet/ss\nContainers:\n webserver:\n Container ID: containerd://87b64a1918421f1afe9cf2b04aee64926fe5ab1e73c2155d4a12c95c9513a3f8\n Image: registry.k8s.io/e2e-test-images/httpd:2.4.38-4\n Image ID: registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22\n Port: <none>\n Host Port: <none>\n State: Waiting\n Reason: CrashLoopBackOff\n Last State: Terminated\n Reason: Error\n Exit Code: 137\n Started: Tue, 14 Mar 2023 11:12:55 +0000\n Finished: Tue, 14 Mar 2023 11:12:57 +0000\n Ready: False\n Restart Count: 6\n Readiness: http-get http://:80/index.html delay=0s timeout=1s period=1s #success=1 #failure=1\n Environment: <none>\n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-ws8pk (ro)\nConditions:\n Type Status\n Initialized True \n Ready False \n ContainersReady False \n PodScheduled True \nVolumes:\n kube-api-access-ws8pk:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: <nil>\n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: <none>\nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 10m default-scheduler Successfully assigned statefulset-9056/ss-0 to 172.17.0.1\n Warning FailedCreatePodSandBox 9m58s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: %!w(<nil>): unknown\n Normal Pulling 9m45s kubelet Pulling image \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\"\n Normal Pulled 9m42s kubelet Successfully pulled image \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\" in 3.134795373s (3.134801912s including waiting)\n Warning Failed 9m42s kubelet Error: failed to get sandbox container task: no running task found: task 007d1431f22e79224e3cf93ec1a4d7d94f90db78745f1852447d67666810decb not found: not found\n Warning Failed 9m38s kubelet Error: failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: can't get final child's PID from pipe: EOF: unknown\n Warning Unhealthy 9m34s kubelet Readiness probe failed: Get \"http://10.88.0.76:80/index.html\": dial tcp 10.88.0.76:80: connect: connection refused\n Warning BackOff 9m27s (x4 over 9m31s) kubelet Back-off restarting failed container webserver in pod ss-0_statefulset-9056(4c1cff45-101b-4eb6-8b33-2f2f98d7e171)\n Normal Pulled 9m23s (x3 over 9m38s) kubelet Container image \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\" already present on machine\n Normal Started 9m23s (x2 over 9m36s) kubelet Started container webserver\n Normal Created 9m23s (x3 over 9m38s) kubelet Created container webserver\n Warning Unhealthy 9m22s kubelet Readiness probe failed: Get \"http://10.88.0.82:80/index.html\": dial tcp 10.88.0.82:80: connect: connection refused\n Normal SandboxChanged 4m58s (x73 over 9m41s) kubelet Pod sandbox changed, it will be killed and re-created.\n" Mar 14 11:17:03.052: INFO: Output of kubectl describe ss-0: Name: ss-0 Namespace: statefulset-9056 Priority: 0 Service Account: default Node: 172.17.0.1/172.17.0.1 Start Time: Tue, 14 Mar 2023 11:07:02 +0000 Labels: baz=blah controller-revision-hash=ss-7b6c9599d5 foo=bar statefulset.kubernetes.io/pod-name=ss-0 Annotations: <none> Status: Running IP: 10.88.1.105 IPs: IP: 10.88.1.105 IP: 2001:4860:4860::169 Controlled By: StatefulSet/ss Containers: webserver: Container ID: containerd://87b64a1918421f1afe9cf2b04aee64926fe5ab1e73c2155d4a12c95c9513a3f8 Image: registry.k8s.io/e2e-test-images/httpd:2.4.38-4 Image ID: registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 Port: <none> Host Port: <none> State: Waiting Reason: CrashLoopBackOff Last State: Terminated Reason: Error Exit Code: 137 Started: Tue, 14 Mar 2023 11:12:55 +0000 Finished: Tue, 14 Mar 2023 11:12:57 +0000 Ready: False Restart Count: 6 Readiness: http-get http://:80/index.html delay=0s timeout=1s period=1s #success=1 #failure=1 Environment: <none> Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-ws8pk (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: kube-api-access-ws8pk: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: <nil> DownwardAPI: true QoS Class: BestEffort Node-Selectors: <none> Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 10m default-scheduler Successfully assigned statefulset-9056/ss-0 to 172.17.0.1 Warning FailedCreatePodSandBox 9m58s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: %!w(<nil>): unknown Normal Pulling 9m45s kubelet Pulling image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" Normal Pulled 9m42s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" in 3.134795373s (3.134801912s including waiting) Warning Failed 9m42s kubelet Error: failed to get sandbox container task: no running task found: task 007d1431f22e79224e3cf93ec1a4d7d94f90db78745f1852447d67666810decb not found: not found Warning Failed 9m38s kubelet Error: failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: can't get final child's PID from pipe: EOF: unknown Warning Unhealthy 9m34s kubelet Readiness probe failed: Get "http://10.88.0.76:80/index.html": dial tcp 10.88.0.76:80: connect: connection refused Warning BackOff 9m27s (x4 over 9m31s) kubelet Back-off restarting failed container webserver in pod ss-0_statefulset-9056(4c1cff45-101b-4eb6-8b33-2f2f98d7e171) Normal Pulled 9m23s (x3 over 9m38s) kubelet Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Normal Started 9m23s (x2 over 9m36s) kubelet Started container webserver Normal Created 9m23s (x3 over 9m38s) kubelet Created container webserver Warning Unhealthy 9m22s kubelet Readiness probe failed: Get "http://10.88.0.82:80/index.html": dial tcp 10.88.0.82:80: connect: connection refused Normal SandboxChanged 4m58s (x73 over 9m41s) kubelet Pod sandbox changed, it will be killed and re-created. Mar 14 11:17:03.052: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=statefulset-9056 logs ss-0 --tail=100' Mar 14 11:17:03.153: INFO: stderr: "" Mar 14 11:17:03.153: INFO: stdout: "[Tue Mar 14 11:12:55.716956 2023] [mpm_event:notice] [pid 1:tid 140595298130792] AH00489: Apache/2.4.38 (Unix) configured -- resuming normal operations\n[Tue Mar 14 11:12:55.717025 2023] [core:notice] [pid 1:tid 140595298130792] AH00094: Command line: 'httpd -D FOREGROUND'\n10.88.0.1 - - [14/Mar/2023:11:12:56 +0000] \"GET /index.html HTTP/1.1\" 200 45\n" Mar 14 11:17:03.153: INFO: Last 100 log lines of ss-0: [Tue Mar 14 11:12:55.716956 2023] [mpm_event:notice] [pid 1:tid 140595298130792] AH00489: Apache/2.4.38 (Unix) configured -- resuming normal operations [Tue Mar 14 11:12:55.717025 2023] [core:notice] [pid 1:tid 140595298130792] AH00094: Command line: 'httpd -D FOREGROUND' 10.88.0.1 - - [14/Mar/2023:11:12:56 +0000] "GET /index.html HTTP/1.1" 200 45 Mar 14 11:17:03.153: INFO: Deleting all statefulset in ns statefulset-9056 Mar 14 11:17:03.157: INFO: Scaling statefulset ss to 0 Mar 14 11:17:13.180: INFO: Waiting for statefulset status.replicas updated to 0 Mar 14 11:17:13.183: INFO: Deleting statefulset ss < Exit [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:126 @ 03/14/23 11:17:13.193 (10.252s) > Enter [AfterEach] [sig-apps] StatefulSet - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:17:13.193 Mar 14 11:17:13.193: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-apps] StatefulSet - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:17:13.197 (4ms) > Enter [DeferCleanup (Each)] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:17:13.197 < Exit [DeferCleanup (Each)] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:17:13.197 (0s) > Enter [DeferCleanup (Each)] [sig-apps] StatefulSet - dump namespaces | framework.go:209 @ 03/14/23 11:17:13.197 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:17:13.197 STEP: Collecting events from namespace "statefulset-9056". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:17:13.197 STEP: Found 15 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:17:13.2 Mar 14 11:17:13.200: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for ss-0: { } Scheduled: Successfully assigned statefulset-9056/ss-0 to 172.17.0.1 Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:02 +0000 UTC - event for ss: {statefulset-controller } SuccessfulCreate: create Pod ss-0 in StatefulSet ss successful Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:05 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: %!w(<nil>): unknown Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:18 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Pulling: Pulling image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:21 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Pulled: Successfully pulled image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" in 3.134795373s (3.134801912s including waiting) Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:21 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Failed: Error: failed to get sandbox container task: no running task found: task 007d1431f22e79224e3cf93ec1a4d7d94f90db78745f1852447d67666810decb not found: not found Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:22 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:25 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:25 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Failed: Error: failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: can't get final child's PID from pipe: EOF: unknown Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:25 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Created: Created container webserver Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:27 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Started: Started container webserver Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:29 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "http://10.88.0.76:80/index.html": dial tcp 10.88.0.76:80: connect: connection refused Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:32 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container webserver in pod ss-0_statefulset-9056(4c1cff45-101b-4eb6-8b33-2f2f98d7e171) Mar 14 11:17:13.200: INFO: At 2023-03-14 11:07:41 +0000 UTC - event for ss-0: {kubelet 172.17.0.1} Unhealthy: Readiness probe failed: Get "http://10.88.0.82:80/index.html": dial tcp 10.88.0.82:80: connect: connection refused Mar 14 11:17:13.200: INFO: At 2023-03-14 11:17:03 +0000 UTC - event for ss: {statefulset-controller } SuccessfulDelete: delete Pod ss-0 in StatefulSet ss successful Mar 14 11:17:13.203: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:17:13.203: INFO: Mar 14 11:17:13.205: INFO: Logging node info for node 172.17.0.1 Mar 14 11:17:13.208: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 1618 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:12:47 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:12:47 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:12:47 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:12:47 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:12:47 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:17:13.209: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:17:13.212: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:17:13.218: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:17:13.218: INFO: Container coredns ready: false, restart count 6 Mar 14 11:17:13.250: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:17:13.25 (53ms) < Exit [DeferCleanup (Each)] [sig-apps] StatefulSet - dump namespaces | framework.go:209 @ 03/14/23 11:17:13.25 (53ms) > Enter [DeferCleanup (Each)] [sig-apps] StatefulSet - tear down framework | framework.go:206 @ 03/14/23 11:17:13.25 STEP: Destroying namespace "statefulset-9056" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:17:13.25 < Exit [DeferCleanup (Each)] [sig-apps] StatefulSet - tear down framework | framework.go:206 @ 03/14/23 11:17:13.257 (7ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:17:13.257 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:17:13.257 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\sclient\sUpdate\sDemo\sshould\sscale\sa\sreplication\scontroller\s\s\[Conformance\]$'
[FAILED] Timed out after 300 seconds waiting for name=update-demo pods to reach valid state In [It] at: test/e2e/kubectl/kubectl.go:2350 @ 03/14/23 11:41:33.397from junit_01.xml
> Enter [BeforeEach] [sig-cli] Kubectl client - set up framework | framework.go:191 @ 03/14/23 11:36:32.1 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:36:32.1 Mar 14 11:36:32.100: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename kubectl - test/e2e/framework/framework.go:250 @ 03/14/23 11:36:32.101 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:36:32.114 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:36:32.118 < Exit [BeforeEach] [sig-cli] Kubectl client - set up framework | framework.go:191 @ 03/14/23 11:36:32.121 (21ms) > Enter [BeforeEach] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:36:32.121 < Exit [BeforeEach] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:36:32.122 (0s) > Enter [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:273 @ 03/14/23 11:36:32.122 < Exit [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:273 @ 03/14/23 11:36:32.122 (0s) > Enter [BeforeEach] Update Demo - test/e2e/kubectl/kubectl.go:325 @ 03/14/23 11:36:32.122 < Exit [BeforeEach] Update Demo - test/e2e/kubectl/kubectl.go:325 @ 03/14/23 11:36:32.122 (0s) > Enter [It] should scale a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:351 @ 03/14/23 11:36:32.122 STEP: creating a replication controller - test/e2e/kubectl/kubectl.go:354 @ 03/14/23 11:36:32.122 Mar 14 11:36:32.122: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 create -f -' Mar 14 11:36:32.930: INFO: stderr: "" Mar 14 11:36:32.930: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" STEP: waiting for all containers in name=update-demo pods to come up. - test/e2e/kubectl/kubectl.go:2310 @ 03/14/23 11:36:32.93 Mar 14 11:36:32.930: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:36:33.016: INFO: stderr: "" Mar 14 11:36:33.016: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:36:33.016: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:36:33.094: INFO: stderr: "" Mar 14 11:36:33.094: INFO: stdout: "" Mar 14 11:36:33.094: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:36:38.096: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:36:38.210: INFO: stderr: "" Mar 14 11:36:38.210: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:36:38.210: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:36:38.319: INFO: stderr: "" Mar 14 11:36:38.319: INFO: stdout: "" Mar 14 11:36:38.319: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:36:43.319: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:36:43.408: INFO: stderr: "" Mar 14 11:36:43.408: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:36:43.408: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:36:43.504: INFO: stderr: "" Mar 14 11:36:43.504: INFO: stdout: "" Mar 14 11:36:43.504: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:36:48.505: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:36:48.588: INFO: stderr: "" Mar 14 11:36:48.588: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:36:48.588: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:36:48.664: INFO: stderr: "" Mar 14 11:36:48.664: INFO: stdout: "" Mar 14 11:36:48.664: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:36:53.665: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:36:53.760: INFO: stderr: "" Mar 14 11:36:53.760: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:36:53.760: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:36:53.839: INFO: stderr: "" Mar 14 11:36:53.839: INFO: stdout: "" Mar 14 11:36:53.839: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:36:58.840: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:36:58.925: INFO: stderr: "" Mar 14 11:36:58.925: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:36:58.925: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:36:59.007: INFO: stderr: "" Mar 14 11:36:59.007: INFO: stdout: "" Mar 14 11:36:59.007: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:04.007: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:04.091: INFO: stderr: "" Mar 14 11:37:04.091: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:04.091: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:04.172: INFO: stderr: "" Mar 14 11:37:04.172: INFO: stdout: "" Mar 14 11:37:04.172: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:09.173: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:09.268: INFO: stderr: "" Mar 14 11:37:09.268: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:09.268: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:09.355: INFO: stderr: "" Mar 14 11:37:09.355: INFO: stdout: "" Mar 14 11:37:09.355: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:14.356: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:14.441: INFO: stderr: "" Mar 14 11:37:14.441: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:14.441: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:14.522: INFO: stderr: "" Mar 14 11:37:14.522: INFO: stdout: "" Mar 14 11:37:14.522: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:19.523: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:19.603: INFO: stderr: "" Mar 14 11:37:19.603: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:19.603: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:19.690: INFO: stderr: "" Mar 14 11:37:19.690: INFO: stdout: "" Mar 14 11:37:19.690: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:24.690: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:24.772: INFO: stderr: "" Mar 14 11:37:24.772: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:24.772: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:24.848: INFO: stderr: "" Mar 14 11:37:24.848: INFO: stdout: "" Mar 14 11:37:24.848: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:29.849: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:29.944: INFO: stderr: "" Mar 14 11:37:29.944: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:29.944: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:30.033: INFO: stderr: "" Mar 14 11:37:30.033: INFO: stdout: "" Mar 14 11:37:30.033: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:35.034: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:35.120: INFO: stderr: "" Mar 14 11:37:35.120: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:35.120: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:35.205: INFO: stderr: "" Mar 14 11:37:35.205: INFO: stdout: "" Mar 14 11:37:35.205: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:40.206: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:40.293: INFO: stderr: "" Mar 14 11:37:40.293: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:40.293: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:40.379: INFO: stderr: "" Mar 14 11:37:40.379: INFO: stdout: "" Mar 14 11:37:40.379: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:45.380: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:45.465: INFO: stderr: "" Mar 14 11:37:45.465: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:45.465: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:45.550: INFO: stderr: "" Mar 14 11:37:45.550: INFO: stdout: "" Mar 14 11:37:45.550: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:37:50.551: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:50.633: INFO: stderr: "" Mar 14 11:37:50.633: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:50.633: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:50.707: INFO: stderr: "" Mar 14 11:37:50.707: INFO: stdout: "true" Mar 14 11:37:50.707: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Mar 14 11:37:50.783: INFO: stderr: "" Mar 14 11:37:50.783: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Mar 14 11:37:50.783: INFO: validating pod update-demo-nautilus-cpskq Mar 14 11:37:50.787: INFO: got data: { "image": "nautilus.jpg" } Mar 14 11:37:50.787: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Mar 14 11:37:50.787: INFO: update-demo-nautilus-cpskq is verified up and running Mar 14 11:37:50.787: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-rzmgw -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:50.864: INFO: stderr: "" Mar 14 11:37:50.864: INFO: stdout: "" Mar 14 11:37:50.864: INFO: update-demo-nautilus-rzmgw is created but not running Mar 14 11:37:55.865: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:37:55.958: INFO: stderr: "" Mar 14 11:37:55.958: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:37:55.958: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:37:56.051: INFO: stderr: "" Mar 14 11:37:56.051: INFO: stdout: "" Mar 14 11:37:56.051: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:01.052: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:01.137: INFO: stderr: "" Mar 14 11:38:01.137: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:01.137: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:01.221: INFO: stderr: "" Mar 14 11:38:01.221: INFO: stdout: "" Mar 14 11:38:01.221: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:06.222: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:06.322: INFO: stderr: "" Mar 14 11:38:06.322: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:06.322: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:06.421: INFO: stderr: "" Mar 14 11:38:06.421: INFO: stdout: "" Mar 14 11:38:06.421: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:11.422: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:11.512: INFO: stderr: "" Mar 14 11:38:11.512: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:11.512: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:11.600: INFO: stderr: "" Mar 14 11:38:11.600: INFO: stdout: "" Mar 14 11:38:11.600: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:16.600: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:16.685: INFO: stderr: "" Mar 14 11:38:16.685: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:16.685: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:16.769: INFO: stderr: "" Mar 14 11:38:16.769: INFO: stdout: "" Mar 14 11:38:16.769: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:21.770: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:21.852: INFO: stderr: "" Mar 14 11:38:21.852: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:21.852: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:21.931: INFO: stderr: "" Mar 14 11:38:21.931: INFO: stdout: "" Mar 14 11:38:21.931: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:26.932: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:27.023: INFO: stderr: "" Mar 14 11:38:27.023: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:27.023: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:27.109: INFO: stderr: "" Mar 14 11:38:27.109: INFO: stdout: "" Mar 14 11:38:27.109: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:32.110: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:32.192: INFO: stderr: "" Mar 14 11:38:32.192: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:32.192: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:32.271: INFO: stderr: "" Mar 14 11:38:32.271: INFO: stdout: "" Mar 14 11:38:32.271: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:37.272: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:37.374: INFO: stderr: "" Mar 14 11:38:37.374: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:37.374: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:37.467: INFO: stderr: "" Mar 14 11:38:37.467: INFO: stdout: "" Mar 14 11:38:37.467: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:42.468: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:42.554: INFO: stderr: "" Mar 14 11:38:42.554: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:42.554: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:42.633: INFO: stderr: "" Mar 14 11:38:42.633: INFO: stdout: "" Mar 14 11:38:42.633: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:47.634: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:47.734: INFO: stderr: "" Mar 14 11:38:47.734: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:47.734: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:47.816: INFO: stderr: "" Mar 14 11:38:47.816: INFO: stdout: "" Mar 14 11:38:47.816: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:52.817: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:52.900: INFO: stderr: "" Mar 14 11:38:52.900: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:52.900: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:52.978: INFO: stderr: "" Mar 14 11:38:52.978: INFO: stdout: "" Mar 14 11:38:52.978: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:38:57.979: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:38:58.077: INFO: stderr: "" Mar 14 11:38:58.077: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:38:58.077: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:38:58.169: INFO: stderr: "" Mar 14 11:38:58.169: INFO: stdout: "" Mar 14 11:38:58.169: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:03.171: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:03.252: INFO: stderr: "" Mar 14 11:39:03.252: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:03.252: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:03.330: INFO: stderr: "" Mar 14 11:39:03.330: INFO: stdout: "" Mar 14 11:39:03.330: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:08.331: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:08.431: INFO: stderr: "" Mar 14 11:39:08.431: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:08.431: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:08.508: INFO: stderr: "" Mar 14 11:39:08.508: INFO: stdout: "" Mar 14 11:39:08.508: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:13.509: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:13.593: INFO: stderr: "" Mar 14 11:39:13.593: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:13.593: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:13.669: INFO: stderr: "" Mar 14 11:39:13.669: INFO: stdout: "" Mar 14 11:39:13.669: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:18.669: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:18.749: INFO: stderr: "" Mar 14 11:39:18.749: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:18.749: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:18.826: INFO: stderr: "" Mar 14 11:39:18.826: INFO: stdout: "" Mar 14 11:39:18.826: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:23.826: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:23.914: INFO: stderr: "" Mar 14 11:39:23.914: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:23.914: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:24.023: INFO: stderr: "" Mar 14 11:39:24.023: INFO: stdout: "" Mar 14 11:39:24.023: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:29.023: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:29.108: INFO: stderr: "" Mar 14 11:39:29.108: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:29.108: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:29.192: INFO: stderr: "" Mar 14 11:39:29.193: INFO: stdout: "" Mar 14 11:39:29.193: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:34.194: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:34.296: INFO: stderr: "" Mar 14 11:39:34.296: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:34.296: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:34.387: INFO: stderr: "" Mar 14 11:39:34.387: INFO: stdout: "" Mar 14 11:39:34.387: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:39.387: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:39.472: INFO: stderr: "" Mar 14 11:39:39.472: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:39.473: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:39.554: INFO: stderr: "" Mar 14 11:39:39.554: INFO: stdout: "" Mar 14 11:39:39.554: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:44.555: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:44.636: INFO: stderr: "" Mar 14 11:39:44.636: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:44.636: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:44.712: INFO: stderr: "" Mar 14 11:39:44.712: INFO: stdout: "" Mar 14 11:39:44.712: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:49.713: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:49.795: INFO: stderr: "" Mar 14 11:39:49.795: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:49.795: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:49.871: INFO: stderr: "" Mar 14 11:39:49.871: INFO: stdout: "" Mar 14 11:39:49.871: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:39:54.872: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:39:54.959: INFO: stderr: "" Mar 14 11:39:54.959: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:39:54.959: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:39:55.045: INFO: stderr: "" Mar 14 11:39:55.045: INFO: stdout: "" Mar 14 11:39:55.045: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:00.047: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:00.130: INFO: stderr: "" Mar 14 11:40:00.130: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:00.130: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:00.207: INFO: stderr: "" Mar 14 11:40:00.207: INFO: stdout: "" Mar 14 11:40:00.207: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:05.208: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:05.294: INFO: stderr: "" Mar 14 11:40:05.294: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:05.294: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:05.372: INFO: stderr: "" Mar 14 11:40:05.372: INFO: stdout: "" Mar 14 11:40:05.372: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:10.373: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:10.464: INFO: stderr: "" Mar 14 11:40:10.464: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:10.464: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:10.546: INFO: stderr: "" Mar 14 11:40:10.546: INFO: stdout: "" Mar 14 11:40:10.546: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:15.546: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:15.631: INFO: stderr: "" Mar 14 11:40:15.631: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:15.631: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:15.708: INFO: stderr: "" Mar 14 11:40:15.708: INFO: stdout: "" Mar 14 11:40:15.708: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:20.709: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:20.792: INFO: stderr: "" Mar 14 11:40:20.792: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:20.792: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:20.870: INFO: stderr: "" Mar 14 11:40:20.870: INFO: stdout: "" Mar 14 11:40:20.870: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:25.870: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:25.950: INFO: stderr: "" Mar 14 11:40:25.950: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:25.950: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:26.027: INFO: stderr: "" Mar 14 11:40:26.027: INFO: stdout: "" Mar 14 11:40:26.027: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:31.028: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:31.112: INFO: stderr: "" Mar 14 11:40:31.112: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:31.112: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:31.195: INFO: stderr: "" Mar 14 11:40:31.195: INFO: stdout: "" Mar 14 11:40:31.195: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:36.195: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:36.280: INFO: stderr: "" Mar 14 11:40:36.280: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:36.280: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:36.370: INFO: stderr: "" Mar 14 11:40:36.370: INFO: stdout: "" Mar 14 11:40:36.370: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:41.372: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:41.455: INFO: stderr: "" Mar 14 11:40:41.455: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:41.455: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:41.536: INFO: stderr: "" Mar 14 11:40:41.536: INFO: stdout: "" Mar 14 11:40:41.536: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:46.537: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:46.622: INFO: stderr: "" Mar 14 11:40:46.622: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:46.623: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:46.705: INFO: stderr: "" Mar 14 11:40:46.705: INFO: stdout: "" Mar 14 11:40:46.705: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:51.706: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:51.788: INFO: stderr: "" Mar 14 11:40:51.788: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:51.788: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:51.871: INFO: stderr: "" Mar 14 11:40:51.871: INFO: stdout: "" Mar 14 11:40:51.871: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:40:56.871: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:40:56.966: INFO: stderr: "" Mar 14 11:40:56.966: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:40:56.966: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:40:57.053: INFO: stderr: "" Mar 14 11:40:57.053: INFO: stdout: "" Mar 14 11:40:57.053: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:41:02.053: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:41:02.142: INFO: stderr: "" Mar 14 11:41:02.142: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:41:02.142: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:41:02.240: INFO: stderr: "" Mar 14 11:41:02.240: INFO: stdout: "" Mar 14 11:41:02.240: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:41:07.241: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:41:07.334: INFO: stderr: "" Mar 14 11:41:07.334: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:41:07.334: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:41:07.423: INFO: stderr: "" Mar 14 11:41:07.423: INFO: stdout: "" Mar 14 11:41:07.423: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:41:12.423: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:41:12.515: INFO: stderr: "" Mar 14 11:41:12.515: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:41:12.515: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:41:12.597: INFO: stderr: "" Mar 14 11:41:12.597: INFO: stdout: "" Mar 14 11:41:12.597: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:41:17.598: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:41:17.689: INFO: stderr: "" Mar 14 11:41:17.689: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:41:17.689: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:41:17.769: INFO: stderr: "" Mar 14 11:41:17.769: INFO: stdout: "" Mar 14 11:41:17.769: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:41:22.770: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:41:22.851: INFO: stderr: "" Mar 14 11:41:22.851: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:41:22.851: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:41:22.932: INFO: stderr: "" Mar 14 11:41:22.932: INFO: stdout: "" Mar 14 11:41:22.932: INFO: update-demo-nautilus-cpskq is created but not running Mar 14 11:41:27.933: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Mar 14 11:41:28.019: INFO: stderr: "" Mar 14 11:41:28.019: INFO: stdout: "update-demo-nautilus-cpskq update-demo-nautilus-rzmgw " Mar 14 11:41:28.019: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods update-demo-nautilus-cpskq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Mar 14 11:41:28.110: INFO: stderr: "" Mar 14 11:41:28.110: INFO: stdout: "" Mar 14 11:41:28.110: INFO: update-demo-nautilus-cpskq is created but not running Automatically polling progress: [sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance] (Spec Runtime: 5m0.022s) test/e2e/kubectl/kubectl.go:351 In [It] (Node Runtime: 5m0s) test/e2e/kubectl/kubectl.go:351 At [By Step] waiting for all containers in name=update-demo pods to come up. (Step Runtime: 4m59.192s) test/e2e/kubectl/kubectl.go:2310 Spec Goroutine goroutine 2815 [sleep] time.Sleep(0x12a05f200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/kubectl.validateController({0x7f164c6bba20, 0xc005461740}, {0x7233598, 0xc0037081a0}, {0xc0005b8900?, 0x220dcd7?}, 0x2, {0x6a3cd41, 0xb}, {0x6a5209b, ...}, ...) test/e2e/kubectl/kubectl.go:2312 | ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector | waitLoop: > for start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) { | getPodsOutput := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname) | pods := strings.Fields(getPodsOutput) > k8s.io/kubernetes/test/e2e/kubectl.glob..func1.6.3({0x7f164c6bba20, 0xc005461740}) test/e2e/kubectl/kubectl.go:356 | ginkgo.By("creating a replication controller") | e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") > validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) | ginkgo.By("scaling down the replication controller") | debugDiscovery() k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc005461740}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 STEP: using delete to clean up resources - test/e2e/kubectl/kubectl.go:197 @ 03/14/23 11:41:33.11 Mar 14 11:41:33.110: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 delete --grace-period=0 --force -f -' Mar 14 11:41:33.204: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" Mar 14 11:41:33.204: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" Mar 14 11:41:33.204: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get rc,svc -l name=update-demo --no-headers' Mar 14 11:41:33.300: INFO: stderr: "No resources found in kubectl-4665 namespace.\n" Mar 14 11:41:33.300: INFO: stdout: "" Mar 14 11:41:33.300: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=kubectl-4665 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' Mar 14 11:41:33.397: INFO: stderr: "" Mar 14 11:41:33.397: INFO: stdout: "" [FAILED] Timed out after 300 seconds waiting for name=update-demo pods to reach valid state In [It] at: test/e2e/kubectl/kubectl.go:2350 @ 03/14/23 11:41:33.397 < Exit [It] should scale a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:351 @ 03/14/23 11:41:33.397 (5m1.275s) > Enter [AfterEach] [sig-cli] Kubectl client - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:41:33.397 Mar 14 11:41:33.397: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-cli] Kubectl client - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:41:33.411 (14ms) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:41:33.411 < Exit [DeferCleanup (Each)] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:41:33.411 (0s) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl client - dump namespaces | framework.go:209 @ 03/14/23 11:41:33.411 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:41:33.411 STEP: Collecting events from namespace "kubectl-4665". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:41:33.411 STEP: Found 22 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:41:33.415 Mar 14 11:41:33.415: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for update-demo-nautilus-cpskq: { } Scheduled: Successfully assigned kubectl-4665/update-demo-nautilus-cpskq to 172.17.0.1 Mar 14 11:41:33.415: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for update-demo-nautilus-rzmgw: { } Scheduled: Successfully assigned kubectl-4665/update-demo-nautilus-rzmgw to 172.17.0.1 Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:32 +0000 UTC - event for update-demo-nautilus: {replication-controller } SuccessfulCreate: Created pod: update-demo-nautilus-cpskq Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:32 +0000 UTC - event for update-demo-nautilus: {replication-controller } SuccessfulCreate: Created pod: update-demo-nautilus-rzmgw Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:35 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: %!w(<nil>): unknown Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:35 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} Pulling: Pulling image "registry.k8s.io/e2e-test-images/nautilus:1.7" Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:38 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:38 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} Failed: Error: failed to get sandbox container task: no running task found: task 12be46c05fbd53e4f4eec3a81a625dcb78b67f84939b48b5365de8b8bb486356 not found: not found Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:38 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} Pulled: Successfully pulled image "registry.k8s.io/e2e-test-images/nautilus:1.7" in 2.560050789s (2.56007014s including waiting) Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:39 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/nautilus:1.7" already present on machine Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:39 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} Created: Created container update-demo Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:40 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} Started: Started container update-demo Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:43 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} Failed: Error: failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: can't get final child's PID from pipe: EOF: unknown Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:44 +0000 UTC - event for update-demo-nautilus-rzmgw: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container update-demo in pod update-demo-nautilus-rzmgw_kubectl-4665(97dba548-6936-48ae-a9aa-70f4dfdaefc8) Mar 14 11:41:33.415: INFO: At 2023-03-14 11:36:47 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container task "a5aa6c150ef57a29cba259a241eec3bbf7f562e0ab0c65cdfb0dc8abff427166": OCI runtime start failed: cannot start a container that has stopped: unknown Mar 14 11:41:33.415: INFO: At 2023-03-14 11:37:00 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} Started: Started container update-demo Mar 14 11:41:33.415: INFO: At 2023-03-14 11:37:00 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} Created: Created container update-demo Mar 14 11:41:33.415: INFO: At 2023-03-14 11:37:00 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/nautilus:1.7" already present on machine Mar 14 11:41:33.415: INFO: At 2023-03-14 11:37:01 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:41:33.415: INFO: At 2023-03-14 11:37:03 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container task "8900c30dd2e51900d9c68f77f9046327021f08d5f5d0161ca63039f69507dfac": OCI runtime start failed: container process is already dead: unknown Mar 14 11:41:33.415: INFO: At 2023-03-14 11:37:09 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container update-demo in pod update-demo-nautilus-cpskq_kubectl-4665(da8e0399-e1d3-48e5-803b-d32071bfc9af) Mar 14 11:41:33.415: INFO: At 2023-03-14 11:37:13 +0000 UTC - event for update-demo-nautilus-cpskq: {kubelet 172.17.0.1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container task "0aec10afcc05118d99dfd4108b7427ab8a4b211c434ee3062029c03d00b10565": OCI runtime start failed: cannot start a container that has stopped: unknown Mar 14 11:41:33.419: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:41:33.419: INFO: update-demo-nautilus-cpskq 172.17.0.1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:36:32 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:38:33 +0000 UTC ContainersNotReady containers with unready status: [update-demo]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:38:33 +0000 UTC ContainersNotReady containers with unready status: [update-demo]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:36:32 +0000 UTC }] Mar 14 11:41:33.419: INFO: update-demo-nautilus-rzmgw 172.17.0.1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:36:32 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:39:25 +0000 UTC ContainersNotReady containers with unready status: [update-demo]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:39:25 +0000 UTC ContainersNotReady containers with unready status: [update-demo]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:36:32 +0000 UTC }] Mar 14 11:41:33.419: INFO: Mar 14 11:41:33.450: INFO: Logging node info for node 172.17.0.1 Mar 14 11:41:33.454: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 4684 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:37:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:37:04 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:37:04 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:37:04 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:37:04 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:41:33.455: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:41:33.462: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:41:33.470: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:41:33.470: INFO: Container coredns ready: false, restart count 11 Mar 14 11:41:33.470: INFO: update-demo-nautilus-rzmgw started at 2023-03-14 11:36:32 +0000 UTC (0+1 container statuses recorded) Mar 14 11:41:33.470: INFO: Container update-demo ready: false, restart count 5 Mar 14 11:41:33.470: INFO: update-demo-nautilus-cpskq started at 2023-03-14 11:36:32 +0000 UTC (0+1 container statuses recorded) Mar 14 11:41:33.470: INFO: Container update-demo ready: false, restart count 5 Mar 14 11:41:33.510: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:41:33.51 (99ms) < Exit [DeferCleanup (Each)] [sig-cli] Kubectl client - dump namespaces | framework.go:209 @ 03/14/23 11:41:33.51 (99ms) > Enter [DeferCleanup (Each)] [sig-cli] Kubectl client - tear down framework | framework.go:206 @ 03/14/23 11:41:33.51 STEP: Destroying namespace "kubectl-4665" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:41:33.51 < Exit [DeferCleanup (Each)] [sig-cli] Kubectl client - tear down framework | framework.go:206 @ 03/14/23 11:41:33.518 (8ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:41:33.519 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:41:33.519 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sDNS\sshould\sprovide\s\/etc\/hosts\sentries\sfor\sthe\scluster\s\[Conformance\]$'
[FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/14/23 12:27:15.468from junit_01.xml
> Enter [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/14/23 12:16:38.314 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 12:16:38.314 Mar 14 12:16:38.314: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename dns - test/e2e/framework/framework.go:250 @ 03/14/23 12:16:38.315 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 12:16:38.33 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 12:16:38.336 < Exit [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/14/23 12:16:38.341 (27ms) > Enter [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 12:16:38.341 < Exit [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 12:16:38.341 (0s) > Enter [It] should provide /etc/hosts entries for the cluster [Conformance] - test/e2e/network/dns.go:117 @ 03/14/23 12:16:38.341 STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-196.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;sleep 1; done - test/e2e/network/dns.go:123 @ 03/14/23 12:16:38.341 STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-196.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;sleep 1; done - test/e2e/network/dns.go:124 @ 03/14/23 12:16:38.341 STEP: creating a pod to probe /etc/hosts - test/e2e/network/dns.go:127 @ 03/14/23 12:16:38.341 STEP: submitting the pod to kubernetes - test/e2e/network/dns_common.go:496 @ 03/14/23 12:16:38.341 STEP: retrieving the pod - test/e2e/network/dns_common.go:508 @ 03/14/23 12:16:42.367 STEP: looking for the results for each expected name from probers - test/e2e/network/dns_common.go:514 @ 03/14/23 12:16:42.37 Mar 14 12:16:45.424: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:16:45.428: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:16:45.431: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:16:45.434: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:16:45.434: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:16:53.492: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:16:53.496: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:16:53.499: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:16:53.502: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:16:53.502: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:17:25.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:17:25.445: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:17:25.450: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:17:25.455: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:17:25.455: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:17:33.488: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:17:33.493: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:17:33.496: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:17:33.500: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:17:33.500: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:18:05.442: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:18:05.446: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:18:05.451: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:18:35.458: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:18:35.458: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:18:43.504: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:18:46.576: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:18:49.648: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:18:49.652: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:18:49.652: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:19:20.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:19:20.444: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:19:20.448: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:19:20.452: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:19:20.452: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:19:25.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:19:25.444: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:19:25.448: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:19:25.453: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:19:25.453: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:20:00.441: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:03.508: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:03.512: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:03.516: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:03.516: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:20:35.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:35.444: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:35.448: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:35.452: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:35.452: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:20:43.504: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:43.509: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:43.513: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:43.518: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:20:43.518: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:21:15.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:21:15.444: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:21:15.448: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:21:15.453: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:21:15.453: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:21:23.508: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:21:26.576: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:21:26.580: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:21:26.584: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:21:26.584: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 5m0.028s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 5m0.001s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 4m55.972s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc000899700) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc000899700, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc000899700?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc000899700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc001926a50?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc000382100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc000382100, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc000382100, {0x0?, 0x100500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc000382100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a49560, {0x71f6f30, 0xc001926870}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a49560, {0x71f6f30, 0xc001926870}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 5m20.03s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 5m20.003s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m15.974s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select, 2 minutes] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc000899700) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc000899700, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc000899700?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc000899700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc001926a50?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc000382100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc000382100, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc000382100, {0x0?, 0x100500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc000382100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a49560, {0x71f6f30, 0xc001926870}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a49560, {0x71f6f30, 0xc001926870}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:22:00.441: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:03.504: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:06.576: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:09.652: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:09.652: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:22:13.488: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:13.493: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:13.497: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:13.502: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:13.502: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 5m40.032s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 5m40.005s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m35.975s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003c22f00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003c22f00, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003c22f00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003c22f00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0038dd290?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003c22e00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c22e00, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003c22e00, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003c22e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a48a20, {0x71f6f30, 0xc0038dd0e0}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a48a20, {0x71f6f30, 0xc0038dd0e0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 6m0.033s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 6m0.006s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m55.977s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003c22f00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003c22f00, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003c22f00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003c22f00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0038dd290?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003c22e00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c22e00, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003c22e00, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003c22e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a48a20, {0x71f6f30, 0xc0038dd0e0}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a48a20, {0x71f6f30, 0xc0038dd0e0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:22:45.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:45.444: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:45.448: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:45.452: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:45.452: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:22:53.488: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:53.492: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:53.496: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:53.500: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:22:53.500: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 6m20.035s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 6m20.008s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m15.979s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003aba400) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003aba400, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003aba400?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003aba400) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003a1a6f0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003aba300) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003aba300, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003aba300, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003aba300) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a499e0, {0x71f6f30, 0xc003a1a570}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a499e0, {0x71f6f30, 0xc003a1a570}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 6m40.037s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 6m40.01s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m35.981s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003aba400) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003aba400, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003aba400?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003aba400) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003a1a6f0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003aba300) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003aba300, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003aba300, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003aba300) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a499e0, {0x71f6f30, 0xc003a1a570}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a499e0, {0x71f6f30, 0xc003a1a570}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:23:25.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:23:28.496: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:23:28.500: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:23:28.505: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:23:28.505: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 7m0.039s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 7m0.012s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m55.983s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003aba700) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003aba700, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003aba700?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003aba700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003a1aa50?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003aba600) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003aba600, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003aba600, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003aba600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a49b00, {0x71f6f30, 0xc003a1a8d0}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a49b00, {0x71f6f30, 0xc003a1a8d0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:23:48.912: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:23:53.008: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 7m20.04s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 7m20.013s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m15.984s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select, 2 minutes] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003516300) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003516300, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003516300?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003516300) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc00268f1d0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003516200) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003516200, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003516200, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003516200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc002d06480, {0x71f6f30, 0xc003a1a8d0}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc002d06480, {0x71f6f30, 0xc003a1a8d0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:23:59.120: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:23:59.124: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:23:59.124: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 7m40.042s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 7m40.015s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m35.986s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003516800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003516800, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003516800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003516800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0019267e0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003516700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003516700, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003516700, {0x100?, 0xc000564000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003516700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc002d06360, {0x71f6f30, 0xc001926600}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc002d06360, {0x71f6f30, 0xc001926600}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:24:30.439: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:24:33.520: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:24:33.525: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:24:33.530: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:24:33.530: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 8m0.044s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 8m0.017s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m55.988s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003aba200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003aba200, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003aba200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003aba200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc00268e7e0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003aba100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003aba100, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003aba100, {0x100?, 0xc000564000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003aba100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a48000, {0x71f6f30, 0xc00268e390}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a48000, {0x71f6f30, 0xc00268e390}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 8m20.047s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 8m20.02s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m15.991s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003aba200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003aba200, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003aba200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003aba200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc00268e7e0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003aba100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003aba100, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003aba100, {0x100?, 0xc000564000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003aba100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a48000, {0x71f6f30, 0xc00268e390}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a48000, {0x71f6f30, 0xc00268e390}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:25:05.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:05.444: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:05.448: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:05.451: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:05.451: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:25:13.520: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:16.592: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:16.597: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:16.601: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:16.601: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 8m40.049s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 8m40.022s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m35.993s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 9m0.05s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 9m0.023s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m55.994s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc000899700) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc000899700, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc000899700?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc000899700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0038dc060?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003517f00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003517f00, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003517f00, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003517f00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc002d07200, {0x71f6f30, 0xc001927e90}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc002d07200, {0x71f6f30, 0xc001927e90}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:25:50.442: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:53.520: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:56.591: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 9m20.052s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 9m20.025s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m15.995s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003aba200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003aba200, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003aba200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003aba200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0038dc240?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003aba100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003aba100, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003aba100, {0x100?, 0x9b0e7a0?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003aba100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a48000, {0x71f6f30, 0xc001927e90}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a48000, {0x71f6f30, 0xc001927e90}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:25:59.664: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:25:59.664: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:26:03.508: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:03.512: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:03.515: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:03.518: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:03.518: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 9m40.053s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 9m40.026s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m35.997s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc002eae200) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc002eae200, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc002eae200?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc002eae200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003a1a1e0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc002eae100) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002eae100, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc002eae100, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc002eae100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc002562240, {0x71f6f30, 0xc003a1a000}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc002562240, {0x71f6f30, 0xc003a1a000}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:26:35.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:35.444: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:35.448: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:35.451: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:35.451: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 10m0.055s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 10m0.028s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m55.999s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:26:43.504: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:43.508: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:43.512: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:43.517: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:26:43.517: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Automatically polling progress: [sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] (Spec Runtime: 10m20.056s) test/e2e/network/dns.go:117 In [It] (Node Runtime: 10m20.029s) test/e2e/network/dns.go:117 At [By Step] looking for the results for each expected name from probers (Step Runtime: 10m16s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 9360 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc00481a180, 0xc003abb700) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003abb700, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003abb700?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003abb700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0038dd770?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc00062e6a0, 0xc003abb600) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003abb600, {0x71cb1c0, 0xc00062e6a0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc00406a5a0, 0xc003abb600, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc00406a5a0, 0xc003abb600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003a48ea0, {0x71f6f30, 0xc0038dd5c0}, 0xc000f563b0?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003a48ea0, {0x71f6f30, 0xc0038dd5c0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc000f08f40?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc000f08f40?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc000f08f40}, 0xc00402dde8, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc000f08f40}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc000f08f40?}, {0xc000f09040?, 0x7f164c6bba20?, 0xc000f08f40?}, {0x6a2e4f7?, 0x2d?}, 0xc00375a620?, {0x7233598, 0xc00197eb60}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc000f08f40}, 0xc000cfb3b0, 0xc000fced80, {0xc000f09040, 0x4, 0x4}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.4({0x7f164c6bba20, 0xc000f08f40}) test/e2e/network/dns.go:129 | ginkgo.By("creating a pod to probe /etc/hosts") | pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) > validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc000f08f40}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 12:27:15.440: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:27:15.444: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:27:15.448: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:27:15.452: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:27:15.452: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:27:15.456: INFO: Unable to read wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:27:15.460: INFO: Unable to read wheezy_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:27:15.464: INFO: Unable to read jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:27:15.467: INFO: Unable to read jessie_hosts@dns-querier-1 from pod dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: the server is currently unable to handle the request (get pods dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56) Mar 14 12:27:15.467: INFO: Lookups using dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 failed for: [wheezy_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local wheezy_hosts@dns-querier-1 jessie_hosts@dns-querier-1.dns-test-service.dns-196.svc.cluster.local jessie_hosts@dns-querier-1] Mar 14 12:27:15.467: INFO: Unexpected error: <*errors.errorString | 0xc00022bbb0>: { s: "timed out waiting for the condition", } [FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/14/23 12:27:15.468 < Exit [It] should provide /etc/hosts entries for the cluster [Conformance] - test/e2e/network/dns.go:117 @ 03/14/23 12:27:15.468 (10m37.127s) > Enter [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:33 @ 03/14/23 12:27:15.468 Mar 14 12:27:15.468: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:33 @ 03/14/23 12:27:15.471 (4ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns_common.go:498 @ 03/14/23 12:27:15.471 STEP: deleting the pod - test/e2e/network/dns_common.go:499 @ 03/14/23 12:27:15.471 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns_common.go:498 @ 03/14/23 12:27:15.484 (13ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 12:27:15.484 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 12:27:15.484 (0s) > Enter [DeferCleanup (Each)] [sig-network] DNS - dump namespaces | framework.go:209 @ 03/14/23 12:27:15.484 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 12:27:15.484 STEP: Collecting events from namespace "dns-196". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 12:27:15.484 STEP: Found 14 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 12:27:15.488 Mar 14 12:27:15.488: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: { } Scheduled: Successfully assigned dns-196/dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56 to 172.17.0.1 Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Created: Created container webserver Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Started: Started container webserver Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Created: Created container querier Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Started: Started container querier Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7" already present on machine Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Created: Created container jessie-querier Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:40 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} Started: Started container jessie-querier Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:41 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:47 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container webserver in pod dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56_dns-196(7b6990e6-dafc-4370-9939-1a7138301064) Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:47 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container querier in pod dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56_dns-196(7b6990e6-dafc-4370-9939-1a7138301064) Mar 14 12:27:15.488: INFO: At 2023-03-14 12:16:47 +0000 UTC - event for dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container jessie-querier in pod dns-test-a527677d-e3f4-4ab0-8edc-3a1753e95d56_dns-196(7b6990e6-dafc-4370-9939-1a7138301064) Mar 14 12:27:15.492: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 12:27:15.492: INFO: Mar 14 12:27:15.495: INFO: Logging node info for node 172.17.0.1 Mar 14 12:27:15.498: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 12446 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 12:26:54 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 12:26:54 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 12:26:54 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 12:26:54 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 12:26:54 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 12:27:15.499: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 12:27:15.503: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 12:27:15.521: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 12:27:15.521: INFO: Container coredns ready: false, restart count 20 Mar 14 12:27:15.556: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 12:27:15.556 (71ms) < Exit [DeferCleanup (Each)] [sig-network] DNS - dump namespaces | framework.go:209 @ 03/14/23 12:27:15.556 (71ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - tear down framework | framework.go:206 @ 03/14/23 12:27:15.556 STEP: Destroying namespace "dns-196" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 12:27:15.556 < Exit [DeferCleanup (Each)] [sig-network] DNS - tear down framework | framework.go:206 @ 03/14/23 12:27:15.564 (9ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 12:27:15.564 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 12:27:15.564 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sDNS\sshould\sprovide\sDNS\sfor\spods\sfor\sSubdomain\s\[Conformance\]$'
[FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/14/23 11:30:45.25from junit_01.xml
> Enter [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/14/23 11:19:41.946 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:19:41.946 Mar 14 11:19:41.946: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename dns - test/e2e/framework/framework.go:250 @ 03/14/23 11:19:41.947 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:19:41.957 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:19:41.962 < Exit [BeforeEach] [sig-network] DNS - set up framework | framework.go:191 @ 03/14/23 11:19:41.967 (21ms) > Enter [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:19:41.967 < Exit [BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:19:41.967 (0s) > Enter [It] should provide DNS for pods for Subdomain [Conformance] - test/e2e/network/dns.go:286 @ 03/14/23 11:19:41.967 STEP: Creating a test headless service - test/e2e/network/dns.go:288 @ 03/14/23 11:19:41.967 STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-3251.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-3251.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local;sleep 1; done - test/e2e/network/dns.go:310 @ 03/14/23 11:19:41.971 STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-3251.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-3251.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local;sleep 1; done - test/e2e/network/dns.go:311 @ 03/14/23 11:19:41.971 STEP: creating a pod to probe DNS - test/e2e/network/dns.go:314 @ 03/14/23 11:19:41.971 STEP: submitting the pod to kubernetes - test/e2e/network/dns_common.go:496 @ 03/14/23 11:19:41.971 STEP: retrieving the pod - test/e2e/network/dns_common.go:508 @ 03/14/23 11:19:54.014 STEP: looking for the results for each expected name from probers - test/e2e/network/dns_common.go:514 @ 03/14/23 11:19:54.017 Mar 14 11:20:24.022: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:27.088: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:27.093: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:27.096: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:27.100: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:27.104: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:27.107: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:27.111: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:27.111: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:20:42.416: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:42.420: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:42.424: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:42.428: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:42.432: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:42.435: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:42.439: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:42.442: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:42.442: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:20:47.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:47.121: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:47.124: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:47.129: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:47.133: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:47.136: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:47.140: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:47.146: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:47.146: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:20:55.184: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:55.187: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:55.191: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:55.195: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:55.199: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:55.203: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:55.206: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:55.210: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:20:55.210: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:21:27.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:27.124: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:27.129: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:27.134: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:27.138: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:27.143: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:27.148: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:27.153: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:27.153: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:21:32.116: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:32.119: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:32.123: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:32.126: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:32.129: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:32.133: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:32.136: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:32.139: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:32.139: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:21:40.176: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:43.252: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:43.257: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:43.262: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:43.266: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:43.269: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:21:43.274: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:13.279: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:13.279: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:22:17.116: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:17.121: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:17.125: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:17.130: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:17.134: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:17.138: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:17.142: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:17.146: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:17.146: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:22:25.169: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:25.175: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:25.179: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:25.183: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:25.186: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:25.189: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:25.193: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:25.196: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:25.196: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:22:57.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:57.121: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:57.125: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:57.128: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:57.132: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:57.135: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:57.139: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:57.144: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:22:57.144: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:23:05.168: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:05.172: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:05.176: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:05.179: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:05.182: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:05.185: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:05.188: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:05.191: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:05.191: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:23:37.116: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:37.120: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:37.124: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:37.127: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:37.132: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:37.136: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:37.141: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:37.145: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:37.145: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:23:45.168: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:45.172: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:45.176: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:45.180: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:45.183: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:45.186: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:45.189: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:45.192: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:45.192: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:23:53.232: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:53.236: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:53.239: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:53.242: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:53.246: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:53.249: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:53.252: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:53.254: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:53.254: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:23:57.116: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:57.120: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:57.123: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:57.128: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:57.131: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:57.135: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:57.139: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:57.144: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:23:57.144: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:24:05.169: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:05.173: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:05.176: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:05.180: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:05.183: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:05.187: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:05.190: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:05.193: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:05.193: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:24:37.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:37.121: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:37.125: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:37.129: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:37.133: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:37.136: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:37.140: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:37.143: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:37.143: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 5m0.022s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 5m0.001s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 4m47.951s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:24:45.170: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:45.174: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:45.178: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:45.182: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:45.186: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:45.189: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:45.192: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:45.195: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:24:45.195: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 5m20.024s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 5m20.003s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m7.953s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc000562a00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc000562a00, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc000562a00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc000562a00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003e4e810?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc000562900) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc000562900, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc000562900, {0x100?, 0xc00160c000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc000562900) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003eb4240, {0x71f6f30, 0xc003e4e690}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003eb4240, {0x71f6f30, 0xc003e4e690}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:25:17.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:17.121: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:17.125: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:17.129: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:17.132: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:17.136: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:17.140: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:17.145: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:17.145: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 5m40.026s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 5m40.005s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m27.955s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:25:32.464: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:35.540: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:38.608: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:41.680: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:41.683: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:41.687: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:41.690: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:41.693: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:41.693: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 6m0.028s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 6m0.007s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 5m47.957s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:25:42.116: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:42.120: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:42.123: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:42.126: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:42.129: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:42.132: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:42.135: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:42.138: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:42.138: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:25:57.300: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:57.304: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:57.308: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:57.312: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:57.315: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:57.319: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:57.323: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:57.327: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:25:57.327: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 6m20.029s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 6m20.008s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m7.958s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:26:05.169: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:05.174: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:05.179: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:05.182: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:05.186: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:05.189: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:05.192: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:05.195: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:05.195: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 6m40.03s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 6m40.009s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m27.959s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc003c23e00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003c23e00, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003c23e00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003c23e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003ed0f90?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc003c23d00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c23d00, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc003c23d00, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc003c23d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003fa6ea0, {0x71f6f30, 0xc003ed0e10}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003fa6ea0, {0x71f6f30, 0xc003ed0e10}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:26:37.118: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:40.176: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 7m0.032s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 7m0.011s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 6m47.961s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc0008c0800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc0008c0800, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc0008c0800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc0008c0800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003d43e60?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc0008c0700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0008c0700, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc0008c0700, {0x100?, 0xc00160c000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc0008c0700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003eb59e0, {0x71f6f30, 0xc003ed0e10}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003eb59e0, {0x71f6f30, 0xc003ed0e10}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:26:43.252: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:43.258: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:43.264: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:43.269: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:43.273: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:43.277: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:43.277: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:26:47.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:47.121: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:47.126: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:47.130: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:47.134: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:47.137: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:47.140: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:47.144: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:47.144: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:26:55.184: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:55.188: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:55.191: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:55.194: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:55.197: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:55.200: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:55.202: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:55.205: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:26:55.205: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 7m20.033s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 7m20.012s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m7.962s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc003c23e00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003c23e00, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003c23e00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003c23e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0039deff0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc003c23d00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c23d00, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc003c23d00, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc003c23d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003eb4b40, {0x71f6f30, 0xc0039debd0}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003eb4b40, {0x71f6f30, 0xc0039debd0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 7m40.035s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 7m40.014s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m27.964s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc003c23e00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003c23e00, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003c23e00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003c23e00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0039deff0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc003c23d00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c23d00, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc003c23d00, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc003c23d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003eb4b40, {0x71f6f30, 0xc0039debd0}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003eb4b40, {0x71f6f30, 0xc0039debd0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:27:27.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:27.121: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:27.124: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:27.128: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:27.131: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:27.136: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:27.141: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:27.145: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:27.145: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:27:35.183: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:35.188: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:35.193: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:35.196: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:35.199: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:35.202: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:35.205: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:35.207: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:27:35.207: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 8m0.037s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 8m0.016s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 7m47.966s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc001243300) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc001243300, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc001243300?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc001243300) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc004e4b830?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc001243200) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc001243200, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc001243200, {0x100?, 0xc00160c000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc001243200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003fa6900, {0x71f6f30, 0xc004e4b6b0}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003fa6900, {0x71f6f30, 0xc004e4b6b0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 8m20.039s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 8m20.018s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m7.968s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc001243300) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc001243300, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc001243300?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc001243300) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc004e4b830?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc001243200) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc001243200, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc001243200, {0x100?, 0xc00160c000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc001243200) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003fa6900, {0x71f6f30, 0xc004e4b6b0}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003fa6900, {0x71f6f30, 0xc004e4b6b0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:28:07.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:07.121: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:07.126: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:07.130: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:07.133: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:07.137: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:07.141: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:07.146: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:07.146: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 8m40.041s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 8m40.02s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m27.97s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc000555600) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc000555600, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc000555600?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc000555600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003e4f0e0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc000555500) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc000555500, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc000555500, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc000555500) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003eb5440, {0x71f6f30, 0xc003e4ef60}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003eb5440, {0x71f6f30, 0xc003e4ef60}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 9m0.043s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 9m0.021s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 8m47.971s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc000555600) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc000555600, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc000555600?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc000555600) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003e4f0e0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc000555500) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc000555500, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc000555500, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc000555500) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003eb5440, {0x71f6f30, 0xc003e4ef60}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003eb5440, {0x71f6f30, 0xc003e4ef60}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:28:42.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:45.200: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:45.204: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:45.207: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:45.210: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:45.213: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:45.217: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:45.221: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:28:45.221: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 9m20.045s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 9m20.024s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m7.974s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc0006e4d00) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc0006e4d00, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc0006e4d00?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc0006e4d00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003e4ea50?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc0006e4c00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0006e4c00, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc0006e4c00, {0x100?, 0xc00160c000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc0006e4c00) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003fa6480, {0x71f6f30, 0xc003e4e8d0}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003fa6480, {0x71f6f30, 0xc003e4e8d0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:29:17.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:17.122: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:17.126: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:17.130: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:17.135: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:17.139: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:17.142: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:17.148: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:17.148: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 9m40.047s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 9m40.026s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m27.976s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:195 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:29:25.168: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:25.173: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:25.176: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:25.180: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:25.183: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:25.187: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:25.189: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:25.192: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:29:25.192: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 10m0.048s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 10m0.027s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 9m47.977s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc002eaf900) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc002eaf900, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc002eaf900?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc002eaf900) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc0038c6840?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc002eaf700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc002eaf700, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc002eaf700, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc002eaf700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003eb4900, {0x71f6f30, 0xc0038c66c0}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003eb4900, {0x71f6f30, 0xc0038c66c0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:29:57.117: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:01.199: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 10m20.05s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 10m20.029s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 10m7.979s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc001242800) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc001242800, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc001242800?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc001242800) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003885890?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc001242700) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc001242700, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc001242700, {0x100?, 0xc0000bac00?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc001242700) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003c6e360, {0x71f6f30, 0xc0038c66c0}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003c6e360, {0x71f6f30, 0xc0038c66c0}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:30:05.296: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:09.396: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:09.400: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:09.404: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:09.409: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:09.412: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:09.412: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 10m40.052s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 10m40.031s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 10m27.981s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc003c23100) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003c23100, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003c23100?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003c23100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003ed13b0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc003c23000) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c23000, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc003c23000, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc003c23000) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003fa77a0, {0x71f6f30, 0xc003ed1230}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003fa77a0, {0x71f6f30, 0xc003ed1230}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Automatically polling progress: [sig-network] DNS should provide DNS for pods for Subdomain [Conformance] (Spec Runtime: 11m0.053s) test/e2e/network/dns.go:286 In [It] (Node Runtime: 11m0.032s) test/e2e/network/dns.go:286 At [By Step] looking for the results for each expected name from probers (Step Runtime: 10m47.982s) test/e2e/network/dns_common.go:514 Spec Goroutine goroutine 1740 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc0003d7c80, 0xc003c23100) vendor/golang.org/x/net/http2/transport.go:1269 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000ce15f0, 0xc003c23100, {0x0?}) vendor/golang.org/x/net/http2/transport.go:561 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:513 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc0038bc3c0?}, 0xc003c23100?) vendor/golang.org/x/net/http2/transport.go:3085 net/http.(*Transport).roundTrip(0xc0038bc3c0, 0xc003c23100) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/transport.go:548 net/http.(*Transport).RoundTrip(0x64fc620?, 0xc003ed13b0?) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc0040741e0, 0xc003c23000) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc003c23000, {0x71cb1c0, 0xc0040741e0}, {0x8?, 0x695a000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:252 net/http.(*Client).send(0xc003d42150, 0xc003c23000, {0x100?, 0xc000500000?, 0x0?}) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:176 net/http.(*Client).do(0xc003d42150, 0xc003c23000) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:716 net/http.(*Client).Do(...) /go/src/k8s.io/kubernetes/_output/local/.gimme/versions/go1.20.2.linux.amd64/src/net/http/client.go:582 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc003fa77a0, {0x71f6f30, 0xc003ed1230}, 0xc0006ba338?) vendor/k8s.io/client-go/rest/request.go:999 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc003fa77a0, {0x71f6f30, 0xc003ed1230}) vendor/k8s.io/client-go/rest/request.go:1039 > k8s.io/kubernetes/test/e2e/network.assertFilesContain.func1({0x7f164c6bba20?, 0xc003b9bcc0?}) test/e2e/network/dns_common.go:472 | Name(pod.Name). | Suffix(fileDir, fileName). > Do(ctx).Raw() | | if err != nil { k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7f164c6bba20?, 0xc003b9bcc0?}, 0x23?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:149 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0xc003407e30, 0x2b97e2a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:197 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:182 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7f164c6bba20, 0xc003b9bcc0}, 0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/poll.go:116 > k8s.io/kubernetes/test/e2e/network.assertFilesContain({0x7f164c6bba20?, 0xc003b9bcc0?}, {0xc003843700?, 0x7f164c6bba20?, 0xc003b9bcc0?}, {0x6a2e4f7?, 0x2d?}, 0xc003bb7a00?, {0x7233598, 0xc0020644e0}, ...) test/e2e/network/dns_common.go:459 | var failed []string | > framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { | failed = []string{} | > k8s.io/kubernetes/test/e2e/network.assertFilesExist(...) test/e2e/network/dns_common.go:453 | | func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { > assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") | } | > k8s.io/kubernetes/test/e2e/network.validateDNSResults({0x7f164c6bba20, 0xc003b9bcc0}, 0xc000cfb3b0, 0xc003ba5b00, {0xc003843700, 0x8, 0x8}) test/e2e/network/dns_common.go:515 | // Try to find results for each expected name. | ginkgo.By("looking for the results for each expected name from probers") > assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) | | // TODO: probe from the host, too. > k8s.io/kubernetes/test/e2e/network.glob..func2.8({0x7f164c6bba20, 0xc003b9bcc0}) test/e2e/network/dns.go:320 | pod1.Spec.Subdomain = serviceName | > validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) | }) | k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003b9bcc0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Mar 14 11:30:42.116: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.201: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.206: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.210: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.214: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.218: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.222: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.225: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.225: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:30:45.228: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.231: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.234: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.237: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.240: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.244: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.246: INFO: Unable to read jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.250: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local from pod dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: the server is currently unable to handle the request (get pods dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058) Mar 14 11:30:45.250: INFO: Lookups using dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local wheezy_udp@dns-test-service-2.dns-3251.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-3251.svc.cluster.local jessie_udp@dns-test-service-2.dns-3251.svc.cluster.local jessie_tcp@dns-test-service-2.dns-3251.svc.cluster.local] Mar 14 11:30:45.250: INFO: Unexpected error: <*errors.errorString | 0xc00022bbb0>: { s: "timed out waiting for the condition", } [FAILED] timed out waiting for the condition In [It] at: test/e2e/network/dns_common.go:459 @ 03/14/23 11:30:45.25 < Exit [It] should provide DNS for pods for Subdomain [Conformance] - test/e2e/network/dns.go:286 @ 03/14/23 11:30:45.25 (11m3.283s) > Enter [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:30:45.25 Mar 14 11:30:45.250: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:30:45.254 (4ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns_common.go:498 @ 03/14/23 11:30:45.254 STEP: deleting the pod - test/e2e/network/dns_common.go:499 @ 03/14/23 11:30:45.254 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns_common.go:498 @ 03/14/23 11:30:45.266 (12ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns.go:298 @ 03/14/23 11:30:45.266 STEP: deleting the test headless service - test/e2e/network/dns.go:299 @ 03/14/23 11:30:45.266 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/network/dns.go:298 @ 03/14/23 11:30:45.274 (9ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:30:45.274 < Exit [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:30:45.274 (0s) > Enter [DeferCleanup (Each)] [sig-network] DNS - dump namespaces | framework.go:209 @ 03/14/23 11:30:45.274 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:30:45.275 STEP: Collecting events from namespace "dns-3251". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:30:45.275 STEP: Found 16 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:30:45.278 Mar 14 11:30:45.278: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: { } Scheduled: Successfully assigned dns-3251/dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058 to 172.17.0.1 Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:43 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:43 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Created: Created container webserver Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:43 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Started: Started container webserver Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:43 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:43 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Created: Created container querier Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:44 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Started: Started container querier Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:44 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Pulling: Pulling image "registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7" Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:49 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Pulled: Successfully pulled image "registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7" in 5.788945623s (5.788952677s including waiting) Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:49 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Failed: Error: failed to get sandbox container task: no running task found: task 20afe5364abbe77e6448bf875c6a54c60ebfda5f9c3dc9e0e1d9a2163cdca9bb not found: not found Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:50 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:52 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7" already present on machine Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:52 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Created: Created container jessie-querier Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:52 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} Started: Started container jessie-querier Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:54 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container webserver in pod dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058_dns-3251(8eb00f13-b812-409b-86db-c538dc16418e) Mar 14 11:30:45.278: INFO: At 2023-03-14 11:19:54 +0000 UTC - event for dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container querier in pod dns-test-4a58f6cc-9bc3-4cff-aefd-300028cfa058_dns-3251(8eb00f13-b812-409b-86db-c538dc16418e) Mar 14 11:30:45.281: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:30:45.281: INFO: Mar 14 11:30:45.284: INFO: Logging node info for node 172.17.0.1 Mar 14 11:30:45.287: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 3716 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:30:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:30:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:30:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:30:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:30:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:30:45.287: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:30:45.290: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:30:45.307: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:30:45.307: INFO: Container coredns ready: false, restart count 9 Mar 14 11:30:45.340: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:30:45.34 (65ms) < Exit [DeferCleanup (Each)] [sig-network] DNS - dump namespaces | framework.go:209 @ 03/14/23 11:30:45.34 (65ms) > Enter [DeferCleanup (Each)] [sig-network] DNS - tear down framework | framework.go:206 @ 03/14/23 11:30:45.34 STEP: Destroying namespace "dns-3251" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:30:45.34 < Exit [DeferCleanup (Each)] [sig-network] DNS - tear down framework | framework.go:206 @ 03/14/23 11:30:45.346 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:30:45.346 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:30:45.346 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sServices\sshould\sbe\sable\sto\schange\sthe\stype\sfrom\sClusterIP\sto\sExternalName\s\[Conformance\]$'
[FAILED] Expected Service externalsvc to be running: 2 containers failed which is more than allowed 0 In [It] at: test/e2e/network/service.go:4066 @ 03/14/23 11:58:18.13from junit_01.xml
> Enter [BeforeEach] [sig-network] Services - set up framework | framework.go:191 @ 03/14/23 11:58:08.97 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:58:08.97 Mar 14 11:58:08.970: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename services - test/e2e/framework/framework.go:250 @ 03/14/23 11:58:08.971 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:58:08.987 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:58:08.993 < Exit [BeforeEach] [sig-network] Services - set up framework | framework.go:191 @ 03/14/23 11:58:08.999 (29ms) > Enter [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:58:08.999 < Exit [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:58:08.999 (0s) > Enter [BeforeEach] [sig-network] Services - test/e2e/network/service.go:764 @ 03/14/23 11:58:08.999 < Exit [BeforeEach] [sig-network] Services - test/e2e/network/service.go:764 @ 03/14/23 11:58:08.999 (0s) > Enter [It] should be able to change the type from ClusterIP to ExternalName [Conformance] - test/e2e/network/service.go:1493 @ 03/14/23 11:58:08.999 STEP: creating a service clusterip-service with the type=ClusterIP in namespace services-2923 - test/e2e/network/service.go:1498 @ 03/14/23 11:58:08.999 STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service - test/e2e/network/service.go:1507 @ 03/14/23 11:58:09.009 STEP: creating service externalsvc in namespace services-2923 - test/e2e/network/service.go:270 @ 03/14/23 11:58:09.009 STEP: creating replication controller externalsvc in namespace services-2923 - test/e2e/framework/rc/rc_utils.go:88 @ 03/14/23 11:58:09.021 I0314 11:58:09.028430 147366 runners.go:194] Created replication controller with name: externalsvc, namespace: services-2923, replica count: 2 I0314 11:58:12.079686 147366 runners.go:194] externalsvc Pods: 2 out of 2 created, 1 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0314 11:58:15.080491 147366 runners.go:194] externalsvc Pods: 2 out of 2 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 2 runningButNotReady I0314 11:58:18.080944 147366 runners.go:194] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0314 11:58:18.080963 147366 runners.go:194] Logging node info for node 172.17.0.1 I0314 11:58:18.084565 147366 runners.go:194] Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 8930 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:56:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} I0314 11:58:18.084797 147366 runners.go:194] Logging kubelet events for node 172.17.0.1 I0314 11:58:18.087802 147366 runners.go:194] Logging pods the kubelet thinks is on node 172.17.0.1 I0314 11:58:18.093717 147366 runners.go:194] externalsvc-8knph started at 2023-03-14 11:58:09 +0000 UTC (0+1 container statuses recorded) I0314 11:58:18.093748 147366 runners.go:194] Container externalsvc ready: true, restart count 1 I0314 11:58:18.093754 147366 runners.go:194] coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) I0314 11:58:18.093760 147366 runners.go:194] Container coredns ready: false, restart count 14 I0314 11:58:18.093764 147366 runners.go:194] externalsvc-j5wlm started at 2023-03-14 11:58:09 +0000 UTC (0+1 container statuses recorded) I0314 11:58:18.093769 147366 runners.go:194] Container externalsvc ready: true, restart count 1 I0314 11:58:18.126841 147366 runners.go:194] Latency metrics for node 172.17.0.1 I0314 11:58:18.130529 147366 runners.go:194] Running kubectl logs on non-ready containers in services-2923 Mar 14 11:58:18.130: INFO: Unexpected error: Expected Service externalsvc to be running: <*errors.errorString | 0xc0011e37d0>: { s: "2 containers failed which is more than allowed 0", } [FAILED] Expected Service externalsvc to be running: 2 containers failed which is more than allowed 0 In [It] at: test/e2e/network/service.go:4066 @ 03/14/23 11:58:18.13 < Exit [It] should be able to change the type from ClusterIP to ExternalName [Conformance] - test/e2e/network/service.go:1493 @ 03/14/23 11:58:18.13 (9.131s) > Enter [AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:58:18.13 Mar 14 11:58:18.130: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:58:18.134 (3ms) > Enter [DeferCleanup (Each)] [sig-network] Services - test/e2e/network/service.go:1501 @ 03/14/23 11:58:18.134 Mar 14 11:58:18.134: INFO: Cleaning up the ClusterIP to ExternalName test service < Exit [DeferCleanup (Each)] [sig-network] Services - test/e2e/network/service.go:1501 @ 03/14/23 11:58:18.145 (11ms) > Enter [DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:58:18.145 < Exit [DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:58:18.145 (0s) > Enter [DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:209 @ 03/14/23 11:58:18.145 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:58:18.145 STEP: Collecting events from namespace "services-2923". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:58:18.145 STEP: Found 12 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:58:18.148 Mar 14 11:58:18.148: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for externalsvc-8knph: { } Scheduled: Successfully assigned services-2923/externalsvc-8knph to 172.17.0.1 Mar 14 11:58:18.148: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for externalsvc-j5wlm: { } Scheduled: Successfully assigned services-2923/externalsvc-j5wlm to 172.17.0.1 Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:09 +0000 UTC - event for externalsvc: {replication-controller } SuccessfulCreate: Created pod: externalsvc-j5wlm Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:09 +0000 UTC - event for externalsvc: {replication-controller } SuccessfulCreate: Created pod: externalsvc-8knph Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:11 +0000 UTC - event for externalsvc-8knph: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:11 +0000 UTC - event for externalsvc-8knph: {kubelet 172.17.0.1} Created: Created container externalsvc Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:11 +0000 UTC - event for externalsvc-8knph: {kubelet 172.17.0.1} Started: Started container externalsvc Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:11 +0000 UTC - event for externalsvc-j5wlm: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:11 +0000 UTC - event for externalsvc-j5wlm: {kubelet 172.17.0.1} Created: Created container externalsvc Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:11 +0000 UTC - event for externalsvc-j5wlm: {kubelet 172.17.0.1} Started: Started container externalsvc Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:14 +0000 UTC - event for externalsvc-8knph: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:58:18.148: INFO: At 2023-03-14 11:58:14 +0000 UTC - event for externalsvc-j5wlm: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:58:18.151: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:58:18.151: INFO: externalsvc-8knph 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:58:09 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:58:17 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:58:17 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:58:09 +0000 UTC }] Mar 14 11:58:18.151: INFO: externalsvc-j5wlm 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:58:09 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:58:17 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:58:17 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:58:09 +0000 UTC }] Mar 14 11:58:18.151: INFO: Mar 14 11:58:18.171: INFO: Logging node info for node 172.17.0.1 Mar 14 11:58:18.175: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 8930 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:56:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:58:18.175: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:58:18.184: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:58:18.191: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:58:18.191: INFO: Container coredns ready: false, restart count 14 Mar 14 11:58:18.191: INFO: externalsvc-j5wlm started at 2023-03-14 11:58:09 +0000 UTC (0+1 container statuses recorded) Mar 14 11:58:18.191: INFO: Container externalsvc ready: true, restart count 1 Mar 14 11:58:18.191: INFO: externalsvc-8knph started at 2023-03-14 11:58:09 +0000 UTC (0+1 container statuses recorded) Mar 14 11:58:18.191: INFO: Container externalsvc ready: false, restart count 1 Mar 14 11:58:18.229: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:58:18.229 (84ms) < Exit [DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:209 @ 03/14/23 11:58:18.229 (84ms) > Enter [DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:206 @ 03/14/23 11:58:18.229 STEP: Destroying namespace "services-2923" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:58:18.229 < Exit [DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:206 @ 03/14/23 11:58:18.238 (9ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:58:18.238 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:58:18.238 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sServices\sshould\sbe\sable\sto\screate\sa\sfunctioning\sNodePort\sservice\s\[Conformance\]$'
[FAILED] service is not reachable within 2m0s timeout on endpoint nodeport-test:80 over TCP protocol In [It] at: test/e2e/network/service.go:1298 @ 03/14/23 11:55:28.179from junit_01.xml
> Enter [BeforeEach] [sig-network] Services - set up framework | framework.go:191 @ 03/14/23 11:53:19.664 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:53:19.664 Mar 14 11:53:19.665: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename services - test/e2e/framework/framework.go:250 @ 03/14/23 11:53:19.666 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:53:19.682 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:53:19.687 < Exit [BeforeEach] [sig-network] Services - set up framework | framework.go:191 @ 03/14/23 11:53:19.692 (28ms) > Enter [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:53:19.692 < Exit [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:53:19.692 (0s) > Enter [BeforeEach] [sig-network] Services - test/e2e/network/service.go:764 @ 03/14/23 11:53:19.692 < Exit [BeforeEach] [sig-network] Services - test/e2e/network/service.go:764 @ 03/14/23 11:53:19.692 (0s) > Enter [It] should be able to create a functioning NodePort service [Conformance] - test/e2e/network/service.go:1280 @ 03/14/23 11:53:19.692 STEP: creating service nodeport-test with type=NodePort in namespace services-773 - test/e2e/network/service.go:1286 @ 03/14/23 11:53:19.692 STEP: creating replication controller nodeport-test in namespace services-773 - test/e2e/framework/rc/rc_utils.go:88 @ 03/14/23 11:53:19.708 I0314 11:53:19.714342 147366 runners.go:194] Created replication controller with name: nodeport-test, namespace: services-773, replica count: 2 I0314 11:53:22.765675 147366 runners.go:194] nodeport-test Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady Mar 14 11:53:22.765: INFO: Creating new exec pod Mar 14 11:53:27.789: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:27.909: INFO: rc: 1 Mar 14 11:53:27.909: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:28.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:29.014: INFO: rc: 1 Mar 14 11:53:29.014: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:29.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:30.006: INFO: rc: 1 Mar 14 11:53:30.006: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:30.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:31.013: INFO: rc: 1 Mar 14 11:53:31.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:31.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:32.010: INFO: rc: 1 Mar 14 11:53:32.010: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:32.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:33.022: INFO: rc: 1 Mar 14 11:53:33.022: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:33.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:34.009: INFO: rc: 1 Mar 14 11:53:34.009: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:34.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:35.011: INFO: rc: 1 Mar 14 11:53:35.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:35.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:36.012: INFO: rc: 1 Mar 14 11:53:36.012: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:36.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:37.023: INFO: rc: 1 Mar 14 11:53:37.023: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:37.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:39.126: INFO: rc: 137 Mar 14 11:53:39.126: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: + echo hostName + nc -v -t -w 2 nodeport-test 80 command terminated with exit code 137 error: exit status 137 Retrying... Mar 14 11:53:39.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:40.018: INFO: rc: 1 Mar 14 11:53:40.018: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:40.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:41.016: INFO: rc: 1 Mar 14 11:53:41.016: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:41.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:42.011: INFO: rc: 1 Mar 14 11:53:42.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:42.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:43.011: INFO: rc: 1 Mar 14 11:53:43.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:43.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:44.011: INFO: rc: 1 Mar 14 11:53:44.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:44.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:45.014: INFO: rc: 1 Mar 14 11:53:45.014: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:45.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:46.011: INFO: rc: 1 Mar 14 11:53:46.012: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:46.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:47.023: INFO: rc: 1 Mar 14 11:53:47.023: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:47.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:48.044: INFO: rc: 1 Mar 14 11:53:48.044: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:48.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:49.015: INFO: rc: 1 Mar 14 11:53:49.016: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:49.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:50.036: INFO: rc: 1 Mar 14 11:53:50.037: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:50.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:51.006: INFO: rc: 1 Mar 14 11:53:51.006: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:51.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:52.026: INFO: rc: 1 Mar 14 11:53:52.026: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:52.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:53.013: INFO: rc: 1 Mar 14 11:53:53.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:53.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:54.025: INFO: rc: 1 Mar 14 11:53:54.025: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:54.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:55.009: INFO: rc: 1 Mar 14 11:53:55.009: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:55.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:56.035: INFO: rc: 1 Mar 14 11:53:56.035: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:56.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:57.024: INFO: rc: 1 Mar 14 11:53:57.024: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:57.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:58.024: INFO: rc: 1 Mar 14 11:53:58.024: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:58.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:53:59.010: INFO: rc: 1 Mar 14 11:53:59.010: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:53:59.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:00.024: INFO: rc: 1 Mar 14 11:54:00.025: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:00.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:01.025: INFO: rc: 1 Mar 14 11:54:01.025: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:01.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:02.019: INFO: rc: 1 Mar 14 11:54:02.019: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:02.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:03.127: INFO: rc: 137 Mar 14 11:54:03.127: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: + echo hostName + nc -v -t -w 2 nodeport-test 80 command terminated with exit code 137 error: exit status 137 Retrying... Mar 14 11:54:03.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:04.031: INFO: rc: 1 Mar 14 11:54:04.031: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:04.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:05.020: INFO: rc: 1 Mar 14 11:54:05.020: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:05.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:06.017: INFO: rc: 1 Mar 14 11:54:06.017: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:06.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:07.032: INFO: rc: 1 Mar 14 11:54:07.032: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:07.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:08.017: INFO: rc: 1 Mar 14 11:54:08.017: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:08.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:09.020: INFO: rc: 1 Mar 14 11:54:09.020: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:09.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:10.013: INFO: rc: 1 Mar 14 11:54:10.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:10.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:11.012: INFO: rc: 1 Mar 14 11:54:11.012: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:11.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:12.013: INFO: rc: 1 Mar 14 11:54:12.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:12.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:13.010: INFO: rc: 1 Mar 14 11:54:13.010: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:13.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:14.015: INFO: rc: 1 Mar 14 11:54:14.015: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:14.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:15.010: INFO: rc: 1 Mar 14 11:54:15.010: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:15.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:16.013: INFO: rc: 1 Mar 14 11:54:16.014: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:16.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:17.017: INFO: rc: 1 Mar 14 11:54:17.017: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:17.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:18.037: INFO: rc: 1 Mar 14 11:54:18.037: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:18.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:19.014: INFO: rc: 1 Mar 14 11:54:19.014: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:19.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:20.029: INFO: rc: 1 Mar 14 11:54:20.029: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:20.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:21.009: INFO: rc: 1 Mar 14 11:54:21.009: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:21.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:22.012: INFO: rc: 1 Mar 14 11:54:22.012: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:22.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:23.013: INFO: rc: 1 Mar 14 11:54:23.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:23.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:24.014: INFO: rc: 1 Mar 14 11:54:24.014: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:24.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:25.010: INFO: rc: 1 Mar 14 11:54:25.010: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:25.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:26.018: INFO: rc: 1 Mar 14 11:54:26.018: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:26.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:27.033: INFO: rc: 1 Mar 14 11:54:27.033: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:27.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:28.019: INFO: rc: 1 Mar 14 11:54:28.019: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:28.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:29.016: INFO: rc: 1 Mar 14 11:54:29.016: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:29.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:30.011: INFO: rc: 1 Mar 14 11:54:30.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:30.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:31.023: INFO: rc: 1 Mar 14 11:54:31.023: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:31.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:32.012: INFO: rc: 1 Mar 14 11:54:32.012: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:32.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:33.011: INFO: rc: 1 Mar 14 11:54:33.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:33.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:34.010: INFO: rc: 1 Mar 14 11:54:34.010: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:34.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:35.013: INFO: rc: 1 Mar 14 11:54:35.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:35.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:36.016: INFO: rc: 1 Mar 14 11:54:36.016: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:36.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:37.028: INFO: rc: 1 Mar 14 11:54:37.028: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:37.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:38.017: INFO: rc: 1 Mar 14 11:54:38.017: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:38.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:39.017: INFO: rc: 1 Mar 14 11:54:39.017: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:39.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:40.013: INFO: rc: 1 Mar 14 11:54:40.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:40.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:41.022: INFO: rc: 1 Mar 14 11:54:41.022: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:41.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:42.015: INFO: rc: 1 Mar 14 11:54:42.015: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:42.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:43.014: INFO: rc: 1 Mar 14 11:54:43.014: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:43.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:45.124: INFO: rc: 137 Mar 14 11:54:45.124: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: + echo hostName + nc -v -t -w 2 nodeport-test 80 command terminated with exit code 137 error: exit status 137 Retrying... Mar 14 11:54:45.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:46.020: INFO: rc: 1 Mar 14 11:54:46.020: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:46.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:47.017: INFO: rc: 1 Mar 14 11:54:47.017: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:47.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:48.024: INFO: rc: 1 Mar 14 11:54:48.024: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:48.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:49.012: INFO: rc: 1 Mar 14 11:54:49.012: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:49.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:50.013: INFO: rc: 1 Mar 14 11:54:50.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:50.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:51.012: INFO: rc: 1 Mar 14 11:54:51.012: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:51.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:52.013: INFO: rc: 1 Mar 14 11:54:52.013: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:52.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:53.017: INFO: rc: 1 Mar 14 11:54:53.017: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:53.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:54.031: INFO: rc: 1 Mar 14 11:54:54.031: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:54.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:55.011: INFO: rc: 1 Mar 14 11:54:55.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:55.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:56.036: INFO: rc: 1 Mar 14 11:54:56.036: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:56.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:57.021: INFO: rc: 1 Mar 14 11:54:57.021: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:57.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:58.041: INFO: rc: 1 Mar 14 11:54:58.041: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:58.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:54:59.018: INFO: rc: 1 Mar 14 11:54:59.018: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:54:59.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:00.035: INFO: rc: 1 Mar 14 11:55:00.035: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:00.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:01.009: INFO: rc: 1 Mar 14 11:55:01.009: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:01.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:02.022: INFO: rc: 1 Mar 14 11:55:02.022: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:02.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:03.010: INFO: rc: 1 Mar 14 11:55:03.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:03.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:04.034: INFO: rc: 1 Mar 14 11:55:04.034: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:04.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:05.011: INFO: rc: 1 Mar 14 11:55:05.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:05.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:06.025: INFO: rc: 1 Mar 14 11:55:06.025: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:06.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:07.025: INFO: rc: 1 Mar 14 11:55:07.025: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:07.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:08.054: INFO: rc: 1 Mar 14 11:55:08.054: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:08.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:09.027: INFO: rc: 1 Mar 14 11:55:09.027: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:09.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:10.034: INFO: rc: 1 Mar 14 11:55:10.034: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:10.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:11.025: INFO: rc: 1 Mar 14 11:55:11.025: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:11.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:12.021: INFO: rc: 1 Mar 14 11:55:12.021: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:12.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:13.012: INFO: rc: 1 Mar 14 11:55:13.012: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:13.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:14.027: INFO: rc: 1 Mar 14 11:55:14.027: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:14.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:15.011: INFO: rc: 1 Mar 14 11:55:15.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:15.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:16.010: INFO: rc: 1 Mar 14 11:55:16.010: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:16.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:17.014: INFO: rc: 1 Mar 14 11:55:17.014: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:17.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:18.023: INFO: rc: 1 Mar 14 11:55:18.023: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:18.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:19.011: INFO: rc: 1 Mar 14 11:55:19.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:19.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:20.011: INFO: rc: 1 Mar 14 11:55:20.011: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:20.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:21.019: INFO: rc: 1 Mar 14 11:55:21.019: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:21.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:22.015: INFO: rc: 1 Mar 14 11:55:22.015: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:22.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:23.007: INFO: rc: 1 Mar 14 11:55:23.007: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:23.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:24.016: INFO: rc: 1 Mar 14 11:55:24.016: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:24.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:25.009: INFO: rc: 1 Mar 14 11:55:25.009: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:25.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:26.014: INFO: rc: 1 Mar 14 11:55:26.014: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:26.910: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:27.025: INFO: rc: 1 Mar 14 11:55:27.025: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:27.909: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:28.040: INFO: rc: 1 Mar 14 11:55:28.040: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:28.040: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' Mar 14 11:55:28.178: INFO: rc: 1 Mar 14 11:55:28.178: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-773 exec execpodmjlkz -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:55:28.178: INFO: Unexpected error: <*errors.errorString | 0xc00468b840>: { s: "service is not reachable within 2m0s timeout on endpoint nodeport-test:80 over TCP protocol", } [FAILED] service is not reachable within 2m0s timeout on endpoint nodeport-test:80 over TCP protocol In [It] at: test/e2e/network/service.go:1298 @ 03/14/23 11:55:28.179 < Exit [It] should be able to create a functioning NodePort service [Conformance] - test/e2e/network/service.go:1280 @ 03/14/23 11:55:28.179 (2m8.487s) > Enter [AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:55:28.179 Mar 14 11:55:28.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:55:28.192 (13ms) > Enter [DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:55:28.192 < Exit [DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:55:28.192 (0s) > Enter [DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:209 @ 03/14/23 11:55:28.192 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:55:28.192 STEP: Collecting events from namespace "services-773". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:55:28.192 STEP: Found 21 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:55:28.196 Mar 14 11:55:28.196: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for execpodmjlkz: { } Scheduled: Successfully assigned services-773/execpodmjlkz to 172.17.0.1 Mar 14 11:55:28.196: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for nodeport-test-dxfbt: { } Scheduled: Successfully assigned services-773/nodeport-test-dxfbt to 172.17.0.1 Mar 14 11:55:28.196: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for nodeport-test-x5wxk: { } Scheduled: Successfully assigned services-773/nodeport-test-x5wxk to 172.17.0.1 Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:19 +0000 UTC - event for nodeport-test: {replication-controller } SuccessfulCreate: Created pod: nodeport-test-x5wxk Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:19 +0000 UTC - event for nodeport-test: {replication-controller } SuccessfulCreate: Created pod: nodeport-test-dxfbt Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:22 +0000 UTC - event for nodeport-test-dxfbt: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:22 +0000 UTC - event for nodeport-test-dxfbt: {kubelet 172.17.0.1} Started: Started container nodeport-test Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:22 +0000 UTC - event for nodeport-test-dxfbt: {kubelet 172.17.0.1} Created: Created container nodeport-test Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:22 +0000 UTC - event for nodeport-test-x5wxk: {kubelet 172.17.0.1} Created: Created container nodeport-test Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:22 +0000 UTC - event for nodeport-test-x5wxk: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:22 +0000 UTC - event for nodeport-test-x5wxk: {kubelet 172.17.0.1} Started: Started container nodeport-test Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:23 +0000 UTC - event for nodeport-test-dxfbt: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:23 +0000 UTC - event for nodeport-test-x5wxk: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:24 +0000 UTC - event for execpodmjlkz: {kubelet 172.17.0.1} Created: Created container agnhost-container Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:24 +0000 UTC - event for execpodmjlkz: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:25 +0000 UTC - event for execpodmjlkz: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:25 +0000 UTC - event for execpodmjlkz: {kubelet 172.17.0.1} Started: Started container agnhost-container Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:27 +0000 UTC - event for execpodmjlkz: {kubelet 172.17.0.1} Failed: Error: failed to start containerd task "agnhost-container": OCI runtime start failed: container process is already dead: unknown Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:29 +0000 UTC - event for execpodmjlkz: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container agnhost-container in pod execpodmjlkz_services-773(2319534b-d842-49c2-a6a9-876f668c01d9) Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:29 +0000 UTC - event for nodeport-test-dxfbt: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container nodeport-test in pod nodeport-test-dxfbt_services-773(b12aea69-0d6d-41d9-a09e-d2b1fe4cc8ca) Mar 14 11:55:28.196: INFO: At 2023-03-14 11:53:29 +0000 UTC - event for nodeport-test-x5wxk: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container nodeport-test in pod nodeport-test-x5wxk_services-773(c593edc3-7fc3-4e57-89e9-598a227406ec) Mar 14 11:55:28.199: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:55:28.199: INFO: execpodmjlkz 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:22 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:54:45 +0000 UTC ContainersNotReady containers with unready status: [agnhost-container]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:54:45 +0000 UTC ContainersNotReady containers with unready status: [agnhost-container]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:22 +0000 UTC }] Mar 14 11:55:28.199: INFO: nodeport-test-dxfbt 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:54:47 +0000 UTC ContainersNotReady containers with unready status: [nodeport-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:54:47 +0000 UTC ContainersNotReady containers with unready status: [nodeport-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:19 +0000 UTC }] Mar 14 11:55:28.199: INFO: nodeport-test-x5wxk 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:54:47 +0000 UTC ContainersNotReady containers with unready status: [nodeport-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:54:47 +0000 UTC ContainersNotReady containers with unready status: [nodeport-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:53:19 +0000 UTC }] Mar 14 11:55:28.199: INFO: Mar 14 11:55:28.231: INFO: Logging node info for node 172.17.0.1 Mar 14 11:55:28.234: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 7397 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:51:10 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:51:10 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:51:10 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:51:10 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:51:10 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:55:28.235: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:55:28.237: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:55:28.242: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:55:28.243: INFO: Container coredns ready: false, restart count 14 Mar 14 11:55:28.243: INFO: nodeport-test-x5wxk started at 2023-03-14 11:53:19 +0000 UTC (0+1 container statuses recorded) Mar 14 11:55:28.243: INFO: Container nodeport-test ready: false, restart count 4 Mar 14 11:55:28.243: INFO: execpodmjlkz started at 2023-03-14 11:53:22 +0000 UTC (0+1 container statuses recorded) Mar 14 11:55:28.243: INFO: Container agnhost-container ready: false, restart count 4 Mar 14 11:55:28.243: INFO: nodeport-test-dxfbt started at 2023-03-14 11:53:19 +0000 UTC (0+1 container statuses recorded) Mar 14 11:55:28.243: INFO: Container nodeport-test ready: false, restart count 4 Mar 14 11:55:28.281: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:55:28.281 (90ms) < Exit [DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:209 @ 03/14/23 11:55:28.281 (90ms) > Enter [DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:206 @ 03/14/23 11:55:28.281 STEP: Destroying namespace "services-773" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:55:28.281 < Exit [DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:206 @ 03/14/23 11:55:28.287 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:55:28.287 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:55:28.287 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sServices\sshould\shave\ssession\saffinity\swork\sfor\sNodePort\sservice\s\[LinuxOnly\]\s\[Conformance\]$'
[FAILED] failed to create replication controller with service in the namespace: services-1068: 3 containers failed which is more than allowed 0 In [It] at: test/e2e/network/service.go:3967 @ 03/14/23 11:19:10.565from junit_01.xml
> Enter [BeforeEach] [sig-network] Services - set up framework | framework.go:191 @ 03/14/23 11:19:01.401 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:19:01.401 Mar 14 11:19:01.401: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename services - test/e2e/framework/framework.go:250 @ 03/14/23 11:19:01.402 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:19:01.417 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:19:01.421 < Exit [BeforeEach] [sig-network] Services - set up framework | framework.go:191 @ 03/14/23 11:19:01.426 (25ms) > Enter [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:19:01.426 < Exit [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:19:01.426 (0s) > Enter [BeforeEach] [sig-network] Services - test/e2e/network/service.go:764 @ 03/14/23 11:19:01.426 < Exit [BeforeEach] [sig-network] Services - test/e2e/network/service.go:764 @ 03/14/23 11:19:01.426 (0s) > Enter [It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2202 @ 03/14/23 11:19:01.426 STEP: creating service in namespace services-1068 - test/e2e/network/service.go:3963 @ 03/14/23 11:19:01.426 STEP: creating service affinity-nodeport in namespace services-1068 - test/e2e/network/service.go:270 @ 03/14/23 11:19:01.426 STEP: creating replication controller affinity-nodeport in namespace services-1068 - test/e2e/framework/rc/rc_utils.go:88 @ 03/14/23 11:19:01.436 I0314 11:19:01.443511 147366 runners.go:194] Created replication controller with name: affinity-nodeport, namespace: services-1068, replica count: 3 I0314 11:19:04.495822 147366 runners.go:194] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0314 11:19:07.495967 147366 runners.go:194] affinity-nodeport Pods: 3 out of 3 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 3 runningButNotReady I0314 11:19:10.496925 147366 runners.go:194] affinity-nodeport Pods: 3 out of 3 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 3 runningButNotReady I0314 11:19:10.496942 147366 runners.go:194] Logging node info for node 172.17.0.1 I0314 11:19:10.500908 147366 runners.go:194] Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 2143 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:17:53 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} I0314 11:19:10.501155 147366 runners.go:194] Logging kubelet events for node 172.17.0.1 I0314 11:19:10.504554 147366 runners.go:194] Logging pods the kubelet thinks is on node 172.17.0.1 I0314 11:19:10.512047 147366 runners.go:194] affinity-nodeport-ltbfb started at 2023-03-14 11:19:01 +0000 UTC (0+1 container statuses recorded) I0314 11:19:10.512067 147366 runners.go:194] Container affinity-nodeport ready: false, restart count 1 I0314 11:19:10.512071 147366 runners.go:194] affinity-nodeport-ncltn started at 2023-03-14 11:19:01 +0000 UTC (0+1 container statuses recorded) I0314 11:19:10.512074 147366 runners.go:194] Container affinity-nodeport ready: false, restart count 1 I0314 11:19:10.512077 147366 runners.go:194] coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) I0314 11:19:10.512083 147366 runners.go:194] Container coredns ready: false, restart count 7 I0314 11:19:10.512086 147366 runners.go:194] affinity-nodeport-xph6g started at 2023-03-14 11:19:01 +0000 UTC (0+1 container statuses recorded) I0314 11:19:10.512089 147366 runners.go:194] Container affinity-nodeport ready: false, restart count 1 I0314 11:19:10.541590 147366 runners.go:194] Latency metrics for node 172.17.0.1 I0314 11:19:10.545519 147366 runners.go:194] Running kubectl logs on non-ready containers in services-1068 Mar 14 11:19:10.551: INFO: Logs of services-1068/affinity-nodeport-ltbfb:affinity-nodeport on node 172.17.0.1 Mar 14 11:19:10.551: INFO: : STARTLOG I0314 11:19:08.202949 1 log.go:198] Serving on port 9376. ENDLOG for container services-1068:affinity-nodeport-ltbfb:affinity-nodeport Mar 14 11:19:10.558: INFO: Logs of services-1068/affinity-nodeport-ncltn:affinity-nodeport on node 172.17.0.1 Mar 14 11:19:10.558: INFO: : STARTLOG I0314 11:19:07.746547 1 log.go:198] Serving on port 9376. ENDLOG for container services-1068:affinity-nodeport-ncltn:affinity-nodeport Mar 14 11:19:10.564: INFO: Logs of services-1068/affinity-nodeport-xph6g:affinity-nodeport on node 172.17.0.1 Mar 14 11:19:10.564: INFO: : STARTLOG I0314 11:19:08.069485 1 log.go:198] Serving on port 9376. ENDLOG for container services-1068:affinity-nodeport-xph6g:affinity-nodeport Mar 14 11:19:10.564: INFO: Unexpected error: failed to create replication controller with service in the namespace: services-1068: <*errors.errorString | 0xc00127f990>: { s: "3 containers failed which is more than allowed 0", } [FAILED] failed to create replication controller with service in the namespace: services-1068: 3 containers failed which is more than allowed 0 In [It] at: test/e2e/network/service.go:3967 @ 03/14/23 11:19:10.565 < Exit [It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2202 @ 03/14/23 11:19:10.565 (9.139s) > Enter [AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:19:10.565 Mar 14 11:19:10.565: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:19:10.568 (3ms) > Enter [DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:19:10.568 < Exit [DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:19:10.568 (0s) > Enter [DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:209 @ 03/14/23 11:19:10.568 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:19:10.568 STEP: Collecting events from namespace "services-1068". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:19:10.568 STEP: Found 18 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:19:10.572 Mar 14 11:19:10.572: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for affinity-nodeport-ltbfb: { } Scheduled: Successfully assigned services-1068/affinity-nodeport-ltbfb to 172.17.0.1 Mar 14 11:19:10.572: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for affinity-nodeport-ncltn: { } Scheduled: Successfully assigned services-1068/affinity-nodeport-ncltn to 172.17.0.1 Mar 14 11:19:10.572: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for affinity-nodeport-xph6g: { } Scheduled: Successfully assigned services-1068/affinity-nodeport-xph6g to 172.17.0.1 Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:01 +0000 UTC - event for affinity-nodeport: {replication-controller } SuccessfulCreate: Created pod: affinity-nodeport-ltbfb Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:01 +0000 UTC - event for affinity-nodeport: {replication-controller } SuccessfulCreate: Created pod: affinity-nodeport-xph6g Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:01 +0000 UTC - event for affinity-nodeport: {replication-controller } SuccessfulCreate: Created pod: affinity-nodeport-ncltn Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:03 +0000 UTC - event for affinity-nodeport-ltbfb: {kubelet 172.17.0.1} Started: Started container affinity-nodeport Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:03 +0000 UTC - event for affinity-nodeport-ltbfb: {kubelet 172.17.0.1} Created: Created container affinity-nodeport Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:03 +0000 UTC - event for affinity-nodeport-ltbfb: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:03 +0000 UTC - event for affinity-nodeport-ncltn: {kubelet 172.17.0.1} Created: Created container affinity-nodeport Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:03 +0000 UTC - event for affinity-nodeport-ncltn: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:03 +0000 UTC - event for affinity-nodeport-xph6g: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:03 +0000 UTC - event for affinity-nodeport-xph6g: {kubelet 172.17.0.1} Created: Created container affinity-nodeport Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:03 +0000 UTC - event for affinity-nodeport-xph6g: {kubelet 172.17.0.1} Started: Started container affinity-nodeport Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:04 +0000 UTC - event for affinity-nodeport-ncltn: {kubelet 172.17.0.1} Started: Started container affinity-nodeport Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:05 +0000 UTC - event for affinity-nodeport-ltbfb: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:05 +0000 UTC - event for affinity-nodeport-ncltn: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:19:10.572: INFO: At 2023-03-14 11:19:05 +0000 UTC - event for affinity-nodeport-xph6g: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:19:10.575: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:19:10.575: INFO: affinity-nodeport-ltbfb 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:01 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:10 +0000 UTC ContainersNotReady containers with unready status: [affinity-nodeport]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:10 +0000 UTC ContainersNotReady containers with unready status: [affinity-nodeport]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:01 +0000 UTC }] Mar 14 11:19:10.575: INFO: affinity-nodeport-ncltn 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:01 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:10 +0000 UTC ContainersNotReady containers with unready status: [affinity-nodeport]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:10 +0000 UTC ContainersNotReady containers with unready status: [affinity-nodeport]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:01 +0000 UTC }] Mar 14 11:19:10.575: INFO: affinity-nodeport-xph6g 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:01 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:10 +0000 UTC ContainersNotReady containers with unready status: [affinity-nodeport]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:10 +0000 UTC ContainersNotReady containers with unready status: [affinity-nodeport]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:19:01 +0000 UTC }] Mar 14 11:19:10.575: INFO: Mar 14 11:19:10.602: INFO: Logging node info for node 172.17.0.1 Mar 14 11:19:10.605: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 2143 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:17:53 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:17:53 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:19:10.605: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:19:10.609: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:19:10.615: INFO: affinity-nodeport-ltbfb started at 2023-03-14 11:19:01 +0000 UTC (0+1 container statuses recorded) Mar 14 11:19:10.615: INFO: Container affinity-nodeport ready: false, restart count 1 Mar 14 11:19:10.615: INFO: affinity-nodeport-ncltn started at 2023-03-14 11:19:01 +0000 UTC (0+1 container statuses recorded) Mar 14 11:19:10.615: INFO: Container affinity-nodeport ready: false, restart count 1 Mar 14 11:19:10.615: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:19:10.615: INFO: Container coredns ready: false, restart count 7 Mar 14 11:19:10.615: INFO: affinity-nodeport-xph6g started at 2023-03-14 11:19:01 +0000 UTC (0+1 container statuses recorded) Mar 14 11:19:10.615: INFO: Container affinity-nodeport ready: false, restart count 1 Mar 14 11:19:10.646: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:19:10.646 (78ms) < Exit [DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:209 @ 03/14/23 11:19:10.646 (78ms) > Enter [DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:206 @ 03/14/23 11:19:10.646 STEP: Destroying namespace "services-1068" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:19:10.646 < Exit [DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:206 @ 03/14/23 11:19:10.656 (10ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:19:10.656 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:19:10.656 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sServices\sshould\sserve\sa\sbasic\sendpoint\sfrom\spods\s\s\[Conformance\]$'
[FAILED] service is not reachable within 2m0s timeout on endpoint endpoint-test2:80 over TCP protocol In [It] at: test/e2e/network/service.go:818 @ 03/14/23 11:58:08.798from junit_01.xml
> Enter [BeforeEach] [sig-network] Services - set up framework | framework.go:191 @ 03/14/23 11:55:28.326 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:55:28.326 Mar 14 11:55:28.326: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename services - test/e2e/framework/framework.go:250 @ 03/14/23 11:55:28.328 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:55:28.356 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:55:28.362 < Exit [BeforeEach] [sig-network] Services - set up framework | framework.go:191 @ 03/14/23 11:55:28.367 (41ms) > Enter [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:55:28.367 < Exit [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:55:28.367 (0s) > Enter [BeforeEach] [sig-network] Services - test/e2e/network/service.go:764 @ 03/14/23 11:55:28.367 < Exit [BeforeEach] [sig-network] Services - test/e2e/network/service.go:764 @ 03/14/23 11:55:28.368 (0s) > Enter [It] should serve a basic endpoint from pods [Conformance] - test/e2e/network/service.go:785 @ 03/14/23 11:55:28.368 STEP: creating service endpoint-test2 in namespace services-8400 - test/e2e/network/service.go:790 @ 03/14/23 11:55:28.368 STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-8400 to expose endpoints map[] - test/e2e/network/service.go:4193 @ 03/14/23 11:55:28.375 Mar 14 11:55:28.390: INFO: successfully validated that service endpoint-test2 in namespace services-8400 exposes endpoints map[] STEP: Creating pod pod1 in namespace services-8400 - test/e2e/network/service.go:4094 @ 03/14/23 11:55:28.39 STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-8400 to expose endpoints map[pod1:[80]] - test/e2e/network/service.go:4193 @ 03/14/23 11:55:46.449 Mar 14 11:55:46.459: INFO: successfully validated that service endpoint-test2 in namespace services-8400 exposes endpoints map[pod1:[80]] STEP: Checking if the Service forwards traffic to pod1 - test/e2e/network/service.go:815 @ 03/14/23 11:55:46.46 Mar 14 11:55:46.460: INFO: Creating new exec pod Mar 14 11:56:08.480: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:08.583: INFO: rc: 1 Mar 14 11:56:08.583: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:09.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:09.691: INFO: rc: 1 Mar 14 11:56:09.691: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:10.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:10.686: INFO: rc: 1 Mar 14 11:56:10.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:11.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:11.700: INFO: rc: 1 Mar 14 11:56:11.700: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:12.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:12.690: INFO: rc: 1 Mar 14 11:56:12.690: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:13.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:13.686: INFO: rc: 1 Mar 14 11:56:13.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:14.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:14.687: INFO: rc: 1 Mar 14 11:56:14.687: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:15.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:15.688: INFO: rc: 1 Mar 14 11:56:15.688: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:16.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:16.702: INFO: rc: 1 Mar 14 11:56:16.702: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:17.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:17.700: INFO: rc: 1 Mar 14 11:56:17.700: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:18.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:18.685: INFO: rc: 1 Mar 14 11:56:18.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:19.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:19.682: INFO: rc: 1 Mar 14 11:56:19.682: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:20.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:20.685: INFO: rc: 1 Mar 14 11:56:20.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:21.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:21.691: INFO: rc: 1 Mar 14 11:56:21.691: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:22.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:22.686: INFO: rc: 1 Mar 14 11:56:22.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:23.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:23.689: INFO: rc: 1 Mar 14 11:56:23.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:24.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:24.689: INFO: rc: 1 Mar 14 11:56:24.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:25.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:25.687: INFO: rc: 1 Mar 14 11:56:25.687: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:26.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:26.688: INFO: rc: 1 Mar 14 11:56:26.688: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:27.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:27.695: INFO: rc: 1 Mar 14 11:56:27.695: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:28.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:29.123: INFO: rc: 137 Mar 14 11:56:29.123: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: + echo hostName + nc -v -t -w 2 endpoint-test2 80 command terminated with exit code 137 error: exit status 137 Retrying... Mar 14 11:56:29.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:29.688: INFO: rc: 1 Mar 14 11:56:29.688: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:30.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:30.696: INFO: rc: 1 Mar 14 11:56:30.696: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:31.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:31.686: INFO: rc: 1 Mar 14 11:56:31.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:32.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:32.689: INFO: rc: 1 Mar 14 11:56:32.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:33.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:33.692: INFO: rc: 1 Mar 14 11:56:33.692: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:34.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:34.684: INFO: rc: 1 Mar 14 11:56:34.684: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:35.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:35.697: INFO: rc: 1 Mar 14 11:56:35.697: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:36.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:36.690: INFO: rc: 1 Mar 14 11:56:36.690: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:37.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:37.696: INFO: rc: 1 Mar 14 11:56:37.696: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:38.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:38.691: INFO: rc: 1 Mar 14 11:56:38.691: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:39.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:39.690: INFO: rc: 1 Mar 14 11:56:39.690: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:40.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:40.691: INFO: rc: 1 Mar 14 11:56:40.691: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:41.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:41.696: INFO: rc: 1 Mar 14 11:56:41.696: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:42.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:42.687: INFO: rc: 1 Mar 14 11:56:42.687: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:43.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:43.695: INFO: rc: 1 Mar 14 11:56:43.695: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:44.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:44.685: INFO: rc: 1 Mar 14 11:56:44.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:45.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:45.687: INFO: rc: 1 Mar 14 11:56:45.687: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:46.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:46.695: INFO: rc: 1 Mar 14 11:56:46.695: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:47.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:47.692: INFO: rc: 1 Mar 14 11:56:47.692: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:48.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:48.685: INFO: rc: 1 Mar 14 11:56:48.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:49.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:49.688: INFO: rc: 1 Mar 14 11:56:49.688: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:50.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:50.682: INFO: rc: 1 Mar 14 11:56:50.682: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:51.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:51.693: INFO: rc: 1 Mar 14 11:56:51.693: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:52.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:52.689: INFO: rc: 1 Mar 14 11:56:52.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:53.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:53.686: INFO: rc: 1 Mar 14 11:56:53.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:54.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:54.685: INFO: rc: 1 Mar 14 11:56:54.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:55.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:55.683: INFO: rc: 1 Mar 14 11:56:55.683: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:56.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:56.692: INFO: rc: 1 Mar 14 11:56:56.692: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:57.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:57.691: INFO: rc: 1 Mar 14 11:56:57.691: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:58.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:58.682: INFO: rc: 1 Mar 14 11:56:58.682: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:56:59.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:56:59.684: INFO: rc: 1 Mar 14 11:56:59.684: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:00.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:00.687: INFO: rc: 1 Mar 14 11:57:00.687: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:01.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:01.684: INFO: rc: 1 Mar 14 11:57:01.684: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:02.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:02.686: INFO: rc: 1 Mar 14 11:57:02.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:03.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:03.702: INFO: rc: 1 Mar 14 11:57:03.702: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:04.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:04.693: INFO: rc: 1 Mar 14 11:57:04.693: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:05.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:05.689: INFO: rc: 1 Mar 14 11:57:05.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:06.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:06.694: INFO: rc: 1 Mar 14 11:57:06.695: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:07.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:07.720: INFO: rc: 1 Mar 14 11:57:07.720: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:08.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:08.689: INFO: rc: 1 Mar 14 11:57:08.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:09.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:09.700: INFO: rc: 1 Mar 14 11:57:09.700: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:10.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:10.689: INFO: rc: 1 Mar 14 11:57:10.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:11.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:11.710: INFO: rc: 1 Mar 14 11:57:11.710: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:12.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:13.132: INFO: rc: 137 Mar 14 11:57:13.132: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: + echo hostName + nc -v -t -w 2 endpoint-test2 80 command terminated with exit code 137 error: exit status 137 Retrying... Mar 14 11:57:13.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:13.711: INFO: rc: 1 Mar 14 11:57:13.711: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:14.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:14.684: INFO: rc: 1 Mar 14 11:57:14.684: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:15.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:15.690: INFO: rc: 1 Mar 14 11:57:15.690: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:16.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:16.693: INFO: rc: 1 Mar 14 11:57:16.693: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:17.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:17.721: INFO: rc: 1 Mar 14 11:57:17.721: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:18.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:18.689: INFO: rc: 1 Mar 14 11:57:18.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:19.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:19.703: INFO: rc: 1 Mar 14 11:57:19.703: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:20.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:20.686: INFO: rc: 1 Mar 14 11:57:20.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:21.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:21.690: INFO: rc: 1 Mar 14 11:57:21.690: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:22.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:22.683: INFO: rc: 1 Mar 14 11:57:22.684: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:23.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:23.694: INFO: rc: 1 Mar 14 11:57:23.694: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:24.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:24.690: INFO: rc: 1 Mar 14 11:57:24.690: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:25.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:25.702: INFO: rc: 1 Mar 14 11:57:25.702: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:26.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:26.700: INFO: rc: 1 Mar 14 11:57:26.700: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:27.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:27.710: INFO: rc: 1 Mar 14 11:57:27.710: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:28.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:28.704: INFO: rc: 1 Mar 14 11:57:28.704: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:29.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:29.692: INFO: rc: 1 Mar 14 11:57:29.692: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:30.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:30.693: INFO: rc: 1 Mar 14 11:57:30.693: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:31.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:31.718: INFO: rc: 1 Mar 14 11:57:31.718: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:32.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:32.694: INFO: rc: 1 Mar 14 11:57:32.694: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:33.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:33.690: INFO: rc: 1 Mar 14 11:57:33.690: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:34.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:34.685: INFO: rc: 1 Mar 14 11:57:34.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:35.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:35.686: INFO: rc: 1 Mar 14 11:57:35.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:36.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:36.711: INFO: rc: 1 Mar 14 11:57:36.711: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:37.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:37.701: INFO: rc: 1 Mar 14 11:57:37.701: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:38.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:38.689: INFO: rc: 1 Mar 14 11:57:38.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:39.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:39.686: INFO: rc: 1 Mar 14 11:57:39.686: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:40.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:40.689: INFO: rc: 1 Mar 14 11:57:40.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:41.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:41.685: INFO: rc: 1 Mar 14 11:57:41.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:42.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:42.692: INFO: rc: 1 Mar 14 11:57:42.692: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:43.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:43.689: INFO: rc: 1 Mar 14 11:57:43.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:44.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:44.691: INFO: rc: 1 Mar 14 11:57:44.691: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:45.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:45.685: INFO: rc: 1 Mar 14 11:57:45.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:46.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:46.705: INFO: rc: 1 Mar 14 11:57:46.705: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:47.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:47.704: INFO: rc: 1 Mar 14 11:57:47.704: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:48.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:48.690: INFO: rc: 1 Mar 14 11:57:48.691: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:49.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:49.687: INFO: rc: 1 Mar 14 11:57:49.687: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:50.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:50.689: INFO: rc: 1 Mar 14 11:57:50.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:51.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:51.684: INFO: rc: 1 Mar 14 11:57:51.684: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:52.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:52.688: INFO: rc: 1 Mar 14 11:57:52.688: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:53.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:53.685: INFO: rc: 1 Mar 14 11:57:53.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:54.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:54.688: INFO: rc: 1 Mar 14 11:57:54.688: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:55.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:55.689: INFO: rc: 1 Mar 14 11:57:55.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:56.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:56.690: INFO: rc: 1 Mar 14 11:57:56.690: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:57.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:57.699: INFO: rc: 1 Mar 14 11:57:57.699: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:58.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:58.689: INFO: rc: 1 Mar 14 11:57:58.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:57:59.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:57:59.689: INFO: rc: 1 Mar 14 11:57:59.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:00.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:00.684: INFO: rc: 1 Mar 14 11:58:00.684: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:01.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:01.687: INFO: rc: 1 Mar 14 11:58:01.687: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:02.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:02.691: INFO: rc: 1 Mar 14 11:58:02.691: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:03.585: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:03.684: INFO: rc: 1 Mar 14 11:58:03.684: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:04.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:04.685: INFO: rc: 1 Mar 14 11:58:04.685: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:05.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:05.693: INFO: rc: 1 Mar 14 11:58:05.693: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:06.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:06.689: INFO: rc: 1 Mar 14 11:58:06.689: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:07.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:07.695: INFO: rc: 1 Mar 14 11:58:07.695: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:08.584: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:08.693: INFO: rc: 1 Mar 14 11:58:08.693: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:08.693: INFO: Running '/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' Mar 14 11:58:08.798: INFO: rc: 1 Mar 14 11:58:08.798: INFO: Service reachability failing with error: error running /go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/workspace/.kube/config --namespace=services-8400 exec execpodwb9t8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80: Command stdout: stderr: error: unable to upgrade connection: container not found ("agnhost-container") error: exit status 1 Retrying... Mar 14 11:58:08.798: INFO: Unexpected error: <*errors.errorString | 0xc000ed6f60>: { s: "service is not reachable within 2m0s timeout on endpoint endpoint-test2:80 over TCP protocol", } [FAILED] service is not reachable within 2m0s timeout on endpoint endpoint-test2:80 over TCP protocol In [It] at: test/e2e/network/service.go:818 @ 03/14/23 11:58:08.798 < Exit [It] should serve a basic endpoint from pods [Conformance] - test/e2e/network/service.go:785 @ 03/14/23 11:58:08.798 (2m40.431s) > Enter [AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:58:08.798 Mar 14 11:58:08.799: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:58:08.812 (13ms) > Enter [DeferCleanup (Each)] [sig-network] Services - test/e2e/network/service.go:801 @ 03/14/23 11:58:08.812 < Exit [DeferCleanup (Each)] [sig-network] Services - test/e2e/network/service.go:801 @ 03/14/23 11:58:08.825 (14ms) > Enter [DeferCleanup (Each)] [sig-network] Services - test/e2e/network/service.go:791 @ 03/14/23 11:58:08.826 < Exit [DeferCleanup (Each)] [sig-network] Services - test/e2e/network/service.go:791 @ 03/14/23 11:58:08.839 (14ms) > Enter [DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:58:08.839 < Exit [DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:58:08.839 (0s) > Enter [DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:209 @ 03/14/23 11:58:08.839 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:58:08.839 STEP: Collecting events from namespace "services-8400". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:58:08.839 STEP: Found 13 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:58:08.843 Mar 14 11:58:08.843: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for execpodwb9t8: { } Scheduled: Successfully assigned services-8400/execpodwb9t8 to 172.17.0.1 Mar 14 11:58:08.843: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for pod1: { } Scheduled: Successfully assigned services-8400/pod1 to 172.17.0.1 Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:30 +0000 UTC - event for pod1: {kubelet 172.17.0.1} Started: Started container agnhost-container Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:30 +0000 UTC - event for pod1: {kubelet 172.17.0.1} Created: Created container agnhost-container Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:30 +0000 UTC - event for pod1: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:31 +0000 UTC - event for pod1: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:37 +0000 UTC - event for pod1: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container agnhost-container in pod pod1_services-8400(3d3ec961-733d-4580-b9b1-689eca7fe832) Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:49 +0000 UTC - event for execpodwb9t8: {kubelet 172.17.0.1} Failed: Error: failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: can't get final child's PID from pipe: EOF: unknown Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:49 +0000 UTC - event for execpodwb9t8: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:49 +0000 UTC - event for execpodwb9t8: {kubelet 172.17.0.1} Created: Created container agnhost-container Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:49 +0000 UTC - event for execpodwb9t8: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:51 +0000 UTC - event for execpodwb9t8: {kubelet 172.17.0.1} Started: Started container agnhost-container Mar 14 11:58:08.843: INFO: At 2023-03-14 11:55:55 +0000 UTC - event for execpodwb9t8: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container agnhost-container in pod execpodwb9t8_services-8400(03b7e50f-608f-4ab3-8942-8a646e32ecdd) Mar 14 11:58:08.847: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:58:08.848: INFO: execpodwb9t8 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:55:46 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:57:13 +0000 UTC ContainersNotReady containers with unready status: [agnhost-container]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:57:13 +0000 UTC ContainersNotReady containers with unready status: [agnhost-container]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:55:46 +0000 UTC }] Mar 14 11:58:08.848: INFO: Mar 14 11:58:08.868: INFO: Logging node info for node 172.17.0.1 Mar 14 11:58:08.872: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 8930 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:56:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:56:16 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:58:08.872: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:58:08.875: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:58:08.884: INFO: execpodwb9t8 started at 2023-03-14 11:55:46 +0000 UTC (0+1 container statuses recorded) Mar 14 11:58:08.884: INFO: Container agnhost-container ready: false, restart count 4 Mar 14 11:58:08.884: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:58:08.884: INFO: Container coredns ready: false, restart count 14 Mar 14 11:58:08.922: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:58:08.922 (82ms) < Exit [DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:209 @ 03/14/23 11:58:08.922 (82ms) > Enter [DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:206 @ 03/14/23 11:58:08.922 STEP: Destroying namespace "services-8400" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:58:08.922 < Exit [DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:206 @ 03/14/23 11:58:08.928 (6ms) > Enter [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:58:08.928 < Exit [ReportAfterEach] TOP-LEVEL - test/e2e/e2e_test.go:144 @ 03/14/23 11:58:08.928 (0s)
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-node\]\sContainer\sLifecycle\sHook\swhen\screate\sa\spod\swith\slifecycle\shook\sshould\sexecute\spoststart\shttp\shook\sproperly\s\[NodeConformance\]\s\[Conformance\]$'
[FAILED] Timed out after 300.001s. expected pod to be running and ready, got instead: <*v1.Pod | 0xc002c89680>: metadata: creationTimestamp: "2023-03-14T11:30:49Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:spec: f:affinity: .: {} f:nodeAffinity: .: {} f:requiredDuringSchedulingIgnoredDuringExecution: {} f:containers: k:{"name":"pod-with-poststart-http-hook"}: .: {} f:image: {} f:imagePullPolicy: {} f:lifecycle: .: {} f:postStart: .: {} f:httpGet: .: {} f:host: {} f:path: {} f:port: {} f:scheme: {} f:name: {} f:resources: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} manager: e2e.test operation: Update time: "2023-03-14T11:30:49Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.3.199"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::3c7"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:35:44Z" name: pod-with-poststart-http-hook namespace: container-lifecycle-hook-107 resourceVersion: "4320" uid: bad2f639-9892-46d5-9548-fafdbcf82ad2 spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchFields: - key: metadata.name operator: In values: - 172.17.0.1 containers: - image: registry.k8s.io/pause:3.9 imagePullPolicy: IfNotPresent lifecycle: postStart: httpGet: host: 10.88.2.237 path: /echo?msg=poststart port: 8080 scheme: HTTP name: pod-with-poststart-http-hook resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vtwrm readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: kube-api-access-vtwrm projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" message: 'containers with unready status: [pod-with-poststart-http-hook]' reason: ContainersNotReady status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" message: 'containers with unready status: [pod-with-poststart-http-hook]' reason: ContainersNotReady status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://08e15c39973243128d80341603c678ab0eca9212ad99b3078b31e01054cb2345 image: registry.k8s.io/pause:3.9 imageID: registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 lastState: terminated: containerID: containerd://08e15c39973243128d80341603c678ab0eca9212ad99b3078b31e01054cb2345 exitCode: 137 finishedAt: "2023-03-14T11:33:43Z" reason: Error startedAt: "2023-03-14T11:33:41Z" name: pod-with-poststart-http-hook ready: false restartCount: 5 started: false state: waiting: message: back-off 2m40s restarting failed container=pod-with-poststart-http-hook pod=pod-with-poststart-http-hook_container-lifecycle-hook-107(bad2f639-9892-46d5-9548-fafdbcf82ad2) reason: CrashLoopBackOff hostIP: 172.17.0.1 phase: Running podIP: 10.88.3.199 podIPs: - ip: 10.88.3.199 - ip: 2001:4860:4860::3c7 qosClass: BestEffort startTime: "2023-03-14T11:30:49Z" In [It] at: test/e2e/framework/pod/pod_client.go:106 @ 03/14/23 11:35:49.494
> Enter [BeforeEach] [sig-node] Container Lifecycle Hook - set up framework | framework.go:191 @ 03/14/23 11:30:45.429 STEP: Creating a kubernetes client - test/e2e/framework/framework.go:211 @ 03/14/23 11:30:45.429 Mar 14 11:30:45.429: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename container-lifecycle-hook - test/e2e/framework/framework.go:250 @ 03/14/23 11:30:45.431 STEP: Waiting for a default service account to be provisioned in namespace - test/e2e/framework/framework.go:259 @ 03/14/23 11:30:45.446 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace - test/e2e/framework/framework.go:262 @ 03/14/23 11:30:45.451 < Exit [BeforeEach] [sig-node] Container Lifecycle Hook - set up framework | framework.go:191 @ 03/14/23 11:30:45.456 (27ms) > Enter [BeforeEach] [sig-node] Container Lifecycle Hook - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:30:45.456 < Exit [BeforeEach] [sig-node] Container Lifecycle Hook - test/e2e/framework/metrics/init/init.go:33 @ 03/14/23 11:30:45.456 (0s) > Enter [BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:78 @ 03/14/23 11:30:45.456 STEP: create the container to handle the HTTPGet hook request. - test/e2e/common/node/lifecycle_hook.go:87 @ 03/14/23 11:30:45.46 < Exit [BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:78 @ 03/14/23 11:30:49.485 (4.029s) > Enter [It] should execute poststart http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:168 @ 03/14/23 11:30:49.485 STEP: create the pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:96 @ 03/14/23 11:30:49.485 Automatically polling progress: [sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance] (Spec Runtime: 5m4.056s) test/e2e/common/node/lifecycle_hook.go:168 In [It] (Node Runtime: 5m0s) test/e2e/common/node/lifecycle_hook.go:168 At [By Step] create the pod with lifecycle hook (Step Runtime: 5m0s) test/e2e/common/node/lifecycle_hook.go:96 Spec Goroutine goroutine 2103 [select] k8s.io/kubernetes/vendor/github.com/onsi/gomega/internal.(*AsyncAssertion).match(0xc0003080e0, {0x71df290?, 0xc000a72370}, 0x1, {0x0, 0x0, 0x0}) vendor/github.com/onsi/gomega/internal/async_assertion.go:530 k8s.io/kubernetes/vendor/github.com/onsi/gomega/internal.(*AsyncAssertion).Should(0xc0003080e0, {0x71df290, 0xc000a72370}, {0x0, 0x0, 0x0}) vendor/github.com/onsi/gomega/internal/async_assertion.go:145 k8s.io/kubernetes/test/e2e/framework.asyncAssertion.Should({{0x7f164c6bba20, 0xc003d4d9c0}, {0xc000a72310, 0x1, 0x1}, 0x45d964b800, 0x77359400, 0x0}, {0x71df290, 0xc000a72370}) test/e2e/framework/expect.go:234 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x7f164c6bba20, 0xc003d4d9c0}, {0x7233598?, 0xc00197f040?}, {0xc003c5ce40, 0x1c}, {0xc000142c40, 0x1c}, {0x6a57f07, 0x11}, ...) test/e2e/framework/pod/wait.go:228 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x7f164c6bba20?, 0xc003d4d9c0?}, {0x7233598?, 0xc00197f040?}, {0xc000142c40?, 0x0?}, {0xc003c5ce40?, 0x26bf1e5?}, 0x0?) test/e2e/framework/pod/wait.go:492 k8s.io/kubernetes/test/e2e/framework/pod.(*PodClient).CreateSync(0xc000c74540, {0x7f164c6bba20, 0xc003d4d9c0}, 0x30?) test/e2e/framework/pod/pod_client.go:106 > k8s.io/kubernetes/test/e2e/common/node.glob..func12.1.2({0x7f164c6bba20?, 0xc003d4d9c0}, 0xc0021c2d80) test/e2e/common/node/lifecycle_hook.go:97 | testPodWithHook := func(ctx context.Context, podWithHook *v1.Pod) { | ginkgo.By("create the pod with lifecycle hook") > podClient.CreateSync(ctx, podWithHook) | const ( | defaultHandler = iota > k8s.io/kubernetes/test/e2e/common/node.glob..func12.1.5({0x7f164c6bba20, 0xc003d4d9c0}) test/e2e/common/node/lifecycle_hook.go:183 | e2epod.SetAffinity(&nodeSelection, targetNode) | e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) > testPodWithHook(ctx, podWithHook) | }) | /* k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x71ff2c0?, 0xc003d4d9c0}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:452 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:854 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:841 Begin Additional Progress Reports >> expected pod to be running and ready, got instead: <*v1.Pod | 0xc002c89680>: metadata: creationTimestamp: "2023-03-14T11:30:49Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:spec: f:affinity: .: {} f:nodeAffinity: .: {} f:requiredDuringSchedulingIgnoredDuringExecution: {} f:containers: k:{"name":"pod-with-poststart-http-hook"}: .: {} f:image: {} f:imagePullPolicy: {} f:lifecycle: .: {} f:postStart: .: {} f:httpGet: .: {} f:host: {} f:path: {} f:port: {} f:scheme: {} f:name: {} f:resources: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} manager: e2e.test operation: Update time: "2023-03-14T11:30:49Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.3.199"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::3c7"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:35:44Z" name: pod-with-poststart-http-hook namespace: container-lifecycle-hook-107 resourceVersion: "4320" uid: bad2f639-9892-46d5-9548-fafdbcf82ad2 spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchFields: - key: metadata.name operator: In values: - 172.17.0.1 containers: - image: registry.k8s.io/pause:3.9 imagePullPolicy: IfNotPresent lifecycle: postStart: httpGet: host: 10.88.2.237 path: /echo?msg=poststart port: 8080 scheme: HTTP name: pod-with-poststart-http-hook resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vtwrm readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: kube-api-access-vtwrm projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" message: 'containers with unready status: [pod-with-poststart-http-hook]' reason: ContainersNotReady status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" message: 'containers with unready status: [pod-with-poststart-http-hook]' reason: ContainersNotReady status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://08e15c39973243128d80341603c678ab0eca9212ad99b3078b31e01054cb2345 image: registry.k8s.io/pause:3.9 imageID: registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 lastState: terminated: containerID: containerd://08e15c39973243128d80341603c678ab0eca9212ad99b3078b31e01054cb2345 exitCode: 137 finishedAt: "2023-03-14T11:33:43Z" reason: Error startedAt: "2023-03-14T11:33:41Z" name: pod-with-poststart-http-hook ready: false restartCount: 5 started: false state: waiting: message: back-off 2m40s restarting failed container=pod-with-poststart-http-hook pod=pod-with-poststart-http-hook_container-lifecycle-hook-107(bad2f639-9892-46d5-9548-fafdbcf82ad2) reason: CrashLoopBackOff hostIP: 172.17.0.1 phase: Running podIP: 10.88.3.199 podIPs: - ip: 10.88.3.199 - ip: 2001:4860:4860::3c7 qosClass: BestEffort startTime: "2023-03-14T11:30:49Z" << End Additional Progress Reports Mar 14 11:35:49.493: INFO: Failed inside E2E framework: k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x7f164c6bba20, 0xc003d4d9c0}, {0x7233598?, 0xc00197f040?}, {0xc003c5ce40, 0x1c}, {0xc000142c40, 0x1c}, {0x6a57f07, 0x11}, ...) test/e2e/framework/pod/wait.go:228 +0x25f k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x7f164c6bba20?, 0xc003d4d9c0?}, {0x7233598?, 0xc00197f040?}, {0xc000142c40?, 0x0?}, {0xc003c5ce40?, 0x26bf1e5?}, 0x0?) test/e2e/framework/pod/wait.go:492 +0x75 k8s.io/kubernetes/test/e2e/framework/pod.(*PodClient).CreateSync(0xc000c74540, {0x7f164c6bba20, 0xc003d4d9c0}, 0x30?) test/e2e/framework/pod/pod_client.go:106 +0x97 k8s.io/kubernetes/test/e2e/common/node.glob..func12.1.2({0x7f164c6bba20?, 0xc003d4d9c0}, 0xc0021c2d80) test/e2e/common/node/lifecycle_hook.go:97 +0x9e k8s.io/kubernetes/test/e2e/common/node.glob..func12.1.5({0x7f164c6bba20, 0xc003d4d9c0}) test/e2e/common/node/lifecycle_hook.go:183 +0x647 [FAILED] Timed out after 300.001s. expected pod to be running and ready, got instead: <*v1.Pod | 0xc002c89680>: metadata: creationTimestamp: "2023-03-14T11:30:49Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:spec: f:affinity: .: {} f:nodeAffinity: .: {} f:requiredDuringSchedulingIgnoredDuringExecution: {} f:containers: k:{"name":"pod-with-poststart-http-hook"}: .: {} f:image: {} f:imagePullPolicy: {} f:lifecycle: .: {} f:postStart: .: {} f:httpGet: .: {} f:host: {} f:path: {} f:port: {} f:scheme: {} f:name: {} f:resources: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} manager: e2e.test operation: Update time: "2023-03-14T11:30:49Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.88.3.199"}: .: {} f:ip: {} k:{"ip":"2001:4860:4860::3c7"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2023-03-14T11:35:44Z" name: pod-with-poststart-http-hook namespace: container-lifecycle-hook-107 resourceVersion: "4320" uid: bad2f639-9892-46d5-9548-fafdbcf82ad2 spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchFields: - key: metadata.name operator: In values: - 172.17.0.1 containers: - image: registry.k8s.io/pause:3.9 imagePullPolicy: IfNotPresent lifecycle: postStart: httpGet: host: 10.88.2.237 path: /echo?msg=poststart port: 8080 scheme: HTTP name: pod-with-poststart-http-hook resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vtwrm readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: 172.17.0.1 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: kube-api-access-vtwrm projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace status: conditions: - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" message: 'containers with unready status: [pod-with-poststart-http-hook]' reason: ContainersNotReady status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" message: 'containers with unready status: [pod-with-poststart-http-hook]' reason: ContainersNotReady status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2023-03-14T11:30:49Z" status: "True" type: PodScheduled containerStatuses: - containerID: containerd://08e15c39973243128d80341603c678ab0eca9212ad99b3078b31e01054cb2345 image: registry.k8s.io/pause:3.9 imageID: registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 lastState: terminated: containerID: containerd://08e15c39973243128d80341603c678ab0eca9212ad99b3078b31e01054cb2345 exitCode: 137 finishedAt: "2023-03-14T11:33:43Z" reason: Error startedAt: "2023-03-14T11:33:41Z" name: pod-with-poststart-http-hook ready: false restartCount: 5 started: false state: waiting: message: back-off 2m40s restarting failed container=pod-with-poststart-http-hook pod=pod-with-poststart-http-hook_container-lifecycle-hook-107(bad2f639-9892-46d5-9548-fafdbcf82ad2) reason: CrashLoopBackOff hostIP: 172.17.0.1 phase: Running podIP: 10.88.3.199 podIPs: - ip: 10.88.3.199 - ip: 2001:4860:4860::3c7 qosClass: BestEffort startTime: "2023-03-14T11:30:49Z" In [It] at: test/e2e/framework/pod/pod_client.go:106 @ 03/14/23 11:35:49.494 < Exit [It] should execute poststart http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:168 @ 03/14/23 11:35:49.494 (5m0.008s) > Enter [AfterEach] [sig-node] Container Lifecycle Hook - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:35:49.494 Mar 14 11:35:49.494: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready < Exit [AfterEach] [sig-node] Container Lifecycle Hook - test/e2e/framework/node/init/init.go:33 @ 03/14/23 11:35:49.498 (4ms) > Enter [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:35:49.498 < Exit [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - test/e2e/framework/metrics/init/init.go:35 @ 03/14/23 11:35:49.498 (0s) > Enter [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - dump namespaces | framework.go:209 @ 03/14/23 11:35:49.498 STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:35:49.498 STEP: Collecting events from namespace "container-lifecycle-hook-107". - test/e2e/framework/debug/dump.go:42 @ 03/14/23 11:35:49.498 STEP: Found 18 events. - test/e2e/framework/debug/dump.go:46 @ 03/14/23 11:35:49.502 Mar 14 11:35:49.502: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for pod-handle-http-request: { } Scheduled: Successfully assigned container-lifecycle-hook-107/pod-handle-http-request to 172.17.0.1 Mar 14 11:35:49.502: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for pod-with-poststart-http-hook: { } Scheduled: Successfully assigned container-lifecycle-hook-107/pod-with-poststart-http-hook to 172.17.0.1 Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:48 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:48 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} Created: Created container container-handle-http-request Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:48 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} Started: Started container container-handle-http-request Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:48 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:48 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} Created: Created container container-handle-https-request Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:48 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} Started: Started container container-handle-https-request Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:49 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:51 +0000 UTC - event for pod-with-poststart-http-hook: {kubelet 172.17.0.1} Created: Created container pod-with-poststart-http-hook Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:51 +0000 UTC - event for pod-with-poststart-http-hook: {kubelet 172.17.0.1} Pulled: Container image "registry.k8s.io/pause:3.9" already present on machine Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:51 +0000 UTC - event for pod-with-poststart-http-hook: {kubelet 172.17.0.1} Started: Started container pod-with-poststart-http-hook Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:54 +0000 UTC - event for pod-with-poststart-http-hook: {kubelet 172.17.0.1} FailedPostStartHook: PostStartHook failed Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:54 +0000 UTC - event for pod-with-poststart-http-hook: {kubelet 172.17.0.1} Killing: FailedPostStartHook Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:55 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container container-handle-https-request in pod pod-handle-http-request_container-lifecycle-hook-107(9371b94e-f7d7-4fa9-9b42-7113a12f800f) Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:55 +0000 UTC - event for pod-handle-http-request: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container container-handle-http-request in pod pod-handle-http-request_container-lifecycle-hook-107(9371b94e-f7d7-4fa9-9b42-7113a12f800f) Mar 14 11:35:49.502: INFO: At 2023-03-14 11:30:55 +0000 UTC - event for pod-with-poststart-http-hook: {kubelet 172.17.0.1} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Mar 14 11:35:49.502: INFO: At 2023-03-14 11:31:03 +0000 UTC - event for pod-with-poststart-http-hook: {kubelet 172.17.0.1} BackOff: Back-off restarting failed container pod-with-poststart-http-hook in pod pod-with-poststart-http-hook_container-lifecycle-hook-107(bad2f639-9892-46d5-9548-fafdbcf82ad2) Mar 14 11:35:49.506: INFO: POD NODE PHASE GRACE CONDITIONS Mar 14 11:35:49.506: INFO: pod-handle-http-request 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:30:45 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:33:37 +0000 UTC ContainersNotReady containers with unready status: [container-handle-http-request container-handle-https-request]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:33:37 +0000 UTC ContainersNotReady containers with unready status: [container-handle-http-request container-handle-https-request]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:30:45 +0000 UTC }] Mar 14 11:35:49.506: INFO: pod-with-poststart-http-hook 172.17.0.1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:30:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:30:49 +0000 UTC ContainersNotReady containers with unready status: [pod-with-poststart-http-hook]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:30:49 +0000 UTC ContainersNotReady containers with unready status: [pod-with-poststart-http-hook]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-03-14 11:30:49 +0000 UTC }] Mar 14 11:35:49.506: INFO: Mar 14 11:35:49.536: INFO: Logging node info for node 172.17.0.1 Mar 14 11:35:49.538: INFO: Node Info: &Node{ObjectMeta:{172.17.0.1 c5a75893-d9b4-41b0-99a6-ba65a78e9388 4284 0 2023-03-14 11:03:36 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:172.17.0.1 kubernetes.io/os:linux] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}}} } {kubelet Update v1 2023-03-14 11:03:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {kubelet Update v1 2023-03-14 11:35:22 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{259962224640 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67441377280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{8 0} {<nil>} 8 DecimalSI},ephemeral-storage: {{233966001789 0} {<nil>} 233966001789 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{67336519680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-03-14 11:35:22 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-03-14 11:35:22 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-03-14 11:35:22 +0000 UTC,LastTransitionTime:2023-03-14 11:03:36 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-03-14 11:35:22 +0000 UTC,LastTransitionTime:2023-03-14 11:03:37 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.1,},NodeAddress{Type:Hostname,Address:172.17.0.1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:3aa74438-b971-78ad-e1b0-78b122c2f143,BootID:05bd4557-1882-407e-8ba6-38c6ad10053c,KernelVersion:5.4.0-1086-gke,OSImage:Debian GNU/Linux 11 (bullseye),ContainerRuntimeVersion:containerd://1.6.16,KubeletVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,KubeProxyVersion:v1.27.0-alpha.3.554+bdf18032e96dd3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Mar 14 11:35:49.538: INFO: Logging kubelet events for node 172.17.0.1 Mar 14 11:35:49.541: INFO: Logging pods the kubelet thinks is on node 172.17.0.1 Mar 14 11:35:49.547: INFO: coredns-55fddfc79-bnh6f started at 2023-03-14 11:03:39 +0000 UTC (0+1 container statuses recorded) Mar 14 11:35:49.547: INFO: Container coredns ready: false, restart count 10 Mar 14 11:35:49.547: INFO: pod-handle-http-request started at 2023-03-14 11:30:45 +0000 UTC (0+2 container statuses recorded) Mar 14 11:35:49.547: INFO: Container container-handle-http-request ready: false, restart count 5 Mar 14 11:35:49.547: INFO: Container container-handle-https-request ready: false, restart count 5 Mar 14 11:35:49.547: INFO: pod-with-poststart-http-hook started at 2023-03-14 11:30:49 +0000 UTC (0+1 container statuses recorded) Mar 14 11:35:49.547: INFO: Container pod-with-poststart-http-hook ready: false, restart count 5 Mar 14 11:35:49.583: INFO: Latency metrics for node 172.17.0.1 END STEP: dump namespace information after failure - test/e2e/framework/framework.go:288 @ 03/14/23 11:35:49.583 (85ms) < Exit [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - dump namespaces | framework.go:209 @ 03/14/23 11:35:49.583 (85ms) > Enter [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - tear down framework | framework.go:206 @ 03/14/23 11:35:49.583 STEP: Destroying namespace "container-lifecycle-hook-107" for this suite. - test/e2e/framework/framework.go:351 @ 03/14/23 11:35:49.583 < Exit [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - tear down framework | framework.go:206 @ 03/14/23 11:35:49.589 (6ms) > Enter [R