Result | FAILURE |
Tests | 24 failed / 861 succeeded |
Started | |
Elapsed | 43m56s |
Revision | master |
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sDeployment\sRecreateDeployment\sshould\sdelete\sold\spods\sand\screate\snew\sones\s\[Conformance\]$'
test/e2e/apps/deployment.go:800 k8s.io/kubernetes/test/e2e/apps.testRecreateDeployment(0xc000e41cd8?) test/e2e/apps/deployment.go:800 +0x5c5 k8s.io/kubernetes/test/e2e/apps.glob..func5.5() test/e2e/apps/deployment.go:114 +0x1dfrom junit_01.xml
[BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 �[1mSTEP:�[0m Creating a kubernetes client �[38;5;243m01/14/23 06:33:36.896�[0m Jan 14 06:33:36.896: INFO: >>> kubeConfig: /root/.kube/config �[1mSTEP:�[0m Building a namespace api object, basename deployment �[38;5;243m01/14/23 06:33:36.898�[0m �[1mSTEP:�[0m Waiting for a default service account to be provisioned in namespace �[38;5;243m01/14/23 06:33:37.212�[0m �[1mSTEP:�[0m Waiting for kube-root-ca.crt to be provisioned in namespace �[38;5;243m01/14/23 06:33:37.419�[0m [BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-apps] Deployment test/e2e/apps/deployment.go:91 [It] RecreateDeployment should delete old pods and create new ones [Conformance] test/e2e/apps/deployment.go:113 Jan 14 06:33:37.627: INFO: Creating deployment "test-recreate-deployment" Jan 14 06:33:37.734: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 Jan 14 06:33:37.943: INFO: Waiting deployment "test-recreate-deployment" to complete Jan 14 06:33:38.047: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:40.153: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:42.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:44.154: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:46.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:48.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:50.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:52.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:54.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:56.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:33:58.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:00.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:02.153: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:04.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:06.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:08.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:10.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:12.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:14.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:16.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:18.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:20.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:22.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:24.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:26.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:28.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:30.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:32.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:34.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:36.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:38.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:40.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:42.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:44.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:46.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:48.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:50.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:52.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:54.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:56.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:34:58.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:00.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:02.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:04.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:06.153: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:08.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:10.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:12.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:14.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:16.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:18.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:20.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:22.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:24.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:26.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:28.153: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:30.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:32.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:34.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:36.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:38.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:40.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:42.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:44.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:46.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:48.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:50.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:52.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:54.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:56.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:35:58.153: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:00.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:02.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:04.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:06.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:08.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:10.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:12.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:14.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:16.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:18.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:20.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:22.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:24.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:26.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:28.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:30.154: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:32.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:34.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:36.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:38.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:40.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:42.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:44.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:46.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:48.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:50.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:52.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:54.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:56.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:36:58.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:00.153: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:02.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:04.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:06.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:08.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:10.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:12.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:14.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:16.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:18.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:20.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:22.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:24.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:26.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:28.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:30.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:32.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:34.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:36.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:38.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:40.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:42.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:44.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:46.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:48.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:50.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:52.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:54.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:56.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:37:58.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:00.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:02.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:04.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:06.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:08.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:10.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:12.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:14.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:16.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:18.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:20.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:22.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:24.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:26.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:28.151: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:30.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:32.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:34.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:36.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:38.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:38.256: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Jan 14 06:38:38.256: INFO: Unexpected error: <*errors.errorString | 0xc001612620>: { s: "error waiting for deployment \"test-recreate-deployment\" status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:\"Available\", Status:\"False\", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:\"MinimumReplicasUnavailable\", Message:\"Deployment does not have minimum availability.\"}, v1.DeploymentCondition{Type:\"Progressing\", Status:\"True\", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:\"ReplicaSetUpdated\", Message:\"ReplicaSet \\\"test-recreate-deployment-795566c5cb\\\" is progressing.\"}}, CollisionCount:(*int32)(nil)}", } Jan 14 06:38:38.256: FAIL: error waiting for deployment "test-recreate-deployment" status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), LastTransitionTime:time.Date(2023, time.January, 14, 6, 33, 37, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-795566c5cb\" is progressing."}}, CollisionCount:(*int32)(nil)} Full Stack Trace k8s.io/kubernetes/test/e2e/apps.testRecreateDeployment(0xc000e41cd8?) test/e2e/apps/deployment.go:800 +0x5c5 k8s.io/kubernetes/test/e2e/apps.glob..func5.5() test/e2e/apps/deployment.go:114 +0x1d [AfterEach] [sig-apps] Deployment test/e2e/apps/deployment.go:84 Jan 14 06:38:38.360: INFO: Deployment "test-recreate-deployment": &Deployment{ObjectMeta:{test-recreate-deployment deployment-6041 363af813-1c24-43be-96d7-3606be2ec6c9 12882 1 2023-01-14 06:33:37 +0000 UTC <nil> <nil> map[name:sample-pod-3] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2023-01-14 06:33:37 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-01-14 06:33:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[name:sample-pod-3] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004072488 <nil> ClusterFirst map[] <nil> false false false <nil> &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil <nil> [] []}},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2023-01-14 06:33:37 +0000 UTC,LastTransitionTime:2023-01-14 06:33:37 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "test-recreate-deployment-795566c5cb" is progressing.,LastUpdateTime:2023-01-14 06:33:37 +0000 UTC,LastTransitionTime:2023-01-14 06:33:37 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} Jan 14 06:38:38.465: INFO: New ReplicaSet "test-recreate-deployment-795566c5cb" of Deployment "test-recreate-deployment": &ReplicaSet{ObjectMeta:{test-recreate-deployment-795566c5cb deployment-6041 0370bd75-2432-4eab-8a8c-ebfc9bb4d475 12881 1 2023-01-14 06:33:37 +0000 UTC <nil> <nil> map[name:sample-pod-3 pod-template-hash:795566c5cb] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-recreate-deployment 363af813-1c24-43be-96d7-3606be2ec6c9 0xc004072827 0xc004072828}] [] [{kube-controller-manager Update apps/v1 2023-01-14 06:33:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"363af813-1c24-43be-96d7-3606be2ec6c9\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-01-14 06:33:37 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 795566c5cb,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[name:sample-pod-3 pod-template-hash:795566c5cb] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0040728d8 <nil> ClusterFirst map[] <nil> false false false <nil> &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil <nil> [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} Jan 14 06:38:38.570: INFO: Pod "test-recreate-deployment-795566c5cb-558bx" is not available: &Pod{ObjectMeta:{test-recreate-deployment-795566c5cb-558bx test-recreate-deployment-795566c5cb- deployment-6041 39ca7e73-1fd9-4966-a736-7d080659381f 12919 0 2023-01-14 06:33:37 +0000 UTC <nil> <nil> map[name:sample-pod-3 pod-template-hash:795566c5cb] map[] [{apps/v1 ReplicaSet test-recreate-deployment-795566c5cb 0370bd75-2432-4eab-8a8c-ebfc9bb4d475 0xc004072c67 0xc004072c68}] [] [{kube-controller-manager Update v1 2023-01-14 06:33:37 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0370bd75-2432-4eab-8a8c-ebfc9bb4d475\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-01-14 06:33:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-4xk6z,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4xk6z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:i-0526f6963633e8375,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-01-14 06:33:37 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-01-14 06:33:37 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [agnhost],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-01-14 06:33:37 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [agnhost],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-01-14 06:33:37 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.20.59.50,PodIP:,StartTime:2023-01-14 06:33:37 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} [AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 Jan 14 06:38:38.570: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 �[1mSTEP:�[0m dump namespace information after failure �[38;5;243m01/14/23 06:38:38.675�[0m �[1mSTEP:�[0m Collecting events from namespace "deployment-6041". �[38;5;243m01/14/23 06:38:38.675�[0m �[1mSTEP:�[0m Found 13 events. �[38;5;243m01/14/23 06:38:38.78�[0m Jan 14 06:38:38.780: INFO: At 2023-01-14 06:33:37 +0000 UTC - event for test-recreate-deployment: {deployment-controller } ScalingReplicaSet: Scaled up replica set test-recreate-deployment-795566c5cb to 1 Jan 14 06:38:38.780: INFO: At 2023-01-14 06:33:37 +0000 UTC - event for test-recreate-deployment-795566c5cb: {replicaset-controller } SuccessfulCreate: Created pod: test-recreate-deployment-795566c5cb-558bx Jan 14 06:38:38.780: INFO: At 2023-01-14 06:33:37 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {default-scheduler } Scheduled: Successfully assigned deployment-6041/test-recreate-deployment-795566c5cb-558bx to i-0526f6963633e8375 Jan 14 06:38:38.780: INFO: At 2023-01-14 06:33:39 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bdeb9aac54d28016547ec3d2a33ec39f56121f6eef26a66db4f5587a962908cc": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:33:50 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "484951659245e39ed8566734b5422f3fd25935449fdb99343340ca5303f2cf76": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:34:04 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cf2ed10330fd7a1b6ebd8344624273e9633f3bb79dbbee274ccf0619ad240d23": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:34:16 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5ec7a71324b9800e131a0dae38cd2506ea23f1fd88dff935c675c8e680492f2b": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:34:27 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "0c30f6ceb33c110e9f274d77ceb15e1400ddff19186c8af940dac4000b1945e4": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:34:39 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "264f6e1971fd78b8b35eb3bedeff52d5a45f35d32e7f040f68eb1f45e410f89e": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:34:53 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e89bc4adb88e0297255b4260f1804b346d041d47bf5c745492c1bcd0681c4d7c": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:35:04 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "af3c894cca5123c55450634d9e2a44bd390d650584fcb46ed3857ee30026edfb": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:35:16 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "3b408700fe6126997b78d1f7a9bcf0bae23774dcad3c9ee05b62f1f1fdf10ad6": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.780: INFO: At 2023-01-14 06:35:29 +0000 UTC - event for test-recreate-deployment-795566c5cb-558bx: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "33211e58ab9c68dd3a9ed707a5a25162302a3c3a1f3df74c26a4b6e1d512c8e0": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:38:38.884: INFO: POD NODE PHASE GRACE CONDITIONS Jan 14 06:38:38.884: INFO: test-recreate-deployment-795566c5cb-558bx i-0526f6963633e8375 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:37 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:37 +0000 UTC ContainersNotReady containers with unready status: [agnhost]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:37 +0000 UTC ContainersNotReady containers with unready status: [agnhost]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:37 +0000 UTC }] Jan 14 06:38:38.884: INFO: Jan 14 06:38:38.989: INFO: Unable to fetch deployment-6041/test-recreate-deployment-795566c5cb-558bx/agnhost logs: the server rejected our request for an unknown reason (get pods test-recreate-deployment-795566c5cb-558bx) Jan 14 06:38:39.094: INFO: Logging node info for node i-0526f6963633e8375 Jan 14 06:38:39.198: INFO: Node Info: &Node{ObjectMeta:{i-0526f6963633e8375 b8bbb07c-e234-4117-968a-d4f54d957b46 15086 0 2023-01-14 06:26:11 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a io.kubernetes.storage.mock/node:some-mock-node kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0526f6963633e8375 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0526f6963633e8375 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.59.50 csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-4581":"i-0526f6963633e8375","ebs.csi.aws.com":"i-0526f6963633e8375"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.4.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:38:03 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:io.kubernetes.storage.mock/node":{},"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.4.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0526f6963633e8375,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.4.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.59.50,},NodeAddress{Type:ExternalIP,Address:13.38.88.176,},NodeAddress{Type:InternalDNS,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-88-176.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec24f53d700a2e9399be7e5e2cc1e943,SystemUUID:ec24f53d-700a-2e93-99be-7e5e2cc1e943,BootID:58d231c1-9ab3-4e54-9948-319cfad92d73,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[gcr.io/authenticated-image-pulling/alpine@sha256:7ff177862cb50c602bfe81f805969412e619c054a2bbead977d0c276988aa4a0 gcr.io/authenticated-image-pulling/alpine:3.7],SizeBytes:2110879,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:38:39.199: INFO: Logging kubelet events for node i-0526f6963633e8375 Jan 14 06:38:39.305: INFO: Logging pods the kubelet thinks is on node i-0526f6963633e8375 Jan 14 06:38:39.418: INFO: security-context-9dbfb503-c405-47ac-9294-6ea974357a5f started at 2023-01-14 06:38:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container test-container ready: false, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-66vns started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: proxy-service-hs9zl-dsx96 started at 2023-01-14 06:34:03 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container proxy-service-hs9zl ready: false, restart count 0 Jan 14 06:38:39.418: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:33:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8z92 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: test-recreate-deployment-795566c5cb-558bx started at 2023-01-14 06:33:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container agnhost ready: false, restart count 0 Jan 14 06:38:39.418: INFO: pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8 started at 2023-01-14 06:34:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container nginx ready: false, restart count 0 Jan 14 06:38:39.418: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:38:39.418: INFO: ebs-csi-node-r8qfk started at 2023-01-14 06:26:12 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:39.418: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:38:39.418: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:38:39.418: INFO: hostexec-i-0526f6963633e8375-kpqf6 started at 2023-01-14 06:33:54 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-mh56b started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cilium-tv25q started at 2023-01-14 06:26:12 +0000 UTC (1+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:38:39.418: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:38:39.418: INFO: pod-subpath-test-preprovisionedpv-r2t6 started at 2023-01-14 06:34:12 +0000 UTC (0+2 container statuses recorded) Jan 14 06:38:39.418: INFO: Container test-container-subpath-preprovisionedpv-r2t6 ready: false, restart count 0 Jan 14 06:38:39.418: INFO: Container test-container-volume-preprovisionedpv-r2t6 ready: false, restart count 0 Jan 14 06:38:39.418: INFO: csi-mockplugin-0 started at 2023-01-14 06:35:25 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:39.418: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:38:39.418: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:38:39.418: INFO: Container mock ready: false, restart count 0 Jan 14 06:38:39.418: INFO: hostexec-i-0526f6963633e8375-7mnst started at 2023-01-14 06:32:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-v4q7f started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-l2cw9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-p4cww started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ts46q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: hostpath-symlink-prep-provisioning-1492 started at 2023-01-14 06:33:32 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container init-volume-provisioning-1492 ready: false, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6xkx2 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-kvr9w started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:38:39.418: INFO: probe-test-f2f33560-aba7-4c26-b536-c1361b27b38a started at 2023-01-14 06:33:51 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container probe-test-f2f33560-aba7-4c26-b536-c1361b27b38a ready: false, restart count 0 Jan 14 06:38:39.418: INFO: pod-subpath-test-preprovisionedpv-2gt8 started at 2023-01-14 06:37:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container test-container-subpath-preprovisionedpv-2gt8 ready: false, restart count 0 Jan 14 06:38:39.418: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:36 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:39.418: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:38:39.418: INFO: Container driver-registrar ready: true, restart count 0 Jan 14 06:38:39.418: INFO: Container mock ready: true, restart count 0 Jan 14 06:38:39.418: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-crc8k started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:39.418: INFO: netserver-0 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:39.418: INFO: Container webserver ready: true, restart count 0 Jan 14 06:38:39.845: INFO: Latency metrics for node i-0526f6963633e8375 Jan 14 06:38:39.845: INFO: Logging node info for node i-06bd219a44e00580c Jan 14 06:38:39.949: INFO: Node Info: &Node{ObjectMeta:{i-06bd219a44e00580c c2a57daf-87e6-4c31-ab8d-158cf1752c85 14461 0 2023-01-14 06:26:09 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-06bd219a44e00580c kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-06bd219a44e00580c topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.61.252 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-06bd219a44e00580c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.3.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:35:28 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:35:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.3.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-06bd219a44e00580c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.3.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:21 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.61.252,},NodeAddress{Type:ExternalIP,Address:15.237.110.205,},NodeAddress{Type:InternalDNS,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-110-205.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec28b615c9f51208890a610e546cafd1,SystemUUID:ec28b615-c9f5-1208-890a-610e546cafd1,BootID:9cfe2407-336f-468c-b599-1b87cbc71140,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847 kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4 kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4,DevicePath:,},},Config:nil,},} Jan 14 06:38:39.949: INFO: Logging kubelet events for node i-06bd219a44e00580c Jan 14 06:38:40.056: INFO: Logging pods the kubelet thinks is on node i-06bd219a44e00580c Jan 14 06:38:40.169: INFO: pod-disruption-failure-ignore-1-xjff6 started at 2023-01-14 06:34:27 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container c ready: false, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6nwrq started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: pod-disruption-failure-ignore-1-6gwgt started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container c ready: false, restart count 0 Jan 14 06:38:40.169: INFO: pod-disruption-failure-ignore-1-ghlhh started at 2023-01-14 06:34:12 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container c ready: false, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zbhxz started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:13 +0000 UTC (0+7 container statuses recorded) Jan 14 06:38:40.169: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:38:40.169: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:38:40.169: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:38:40.169: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:38:40.169: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:38:40.169: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:38:40.169: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:38:40.169: INFO: cilium-k6c6s started at 2023-01-14 06:26:10 +0000 UTC (1+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:38:40.169: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q2x2n started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: ebs-csi-node-62qzb started at 2023-01-14 06:26:10 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:40.169: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:38:40.169: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:38:40.169: INFO: netserver-1 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container webserver ready: true, restart count 0 Jan 14 06:38:40.169: INFO: pod-disruption-failure-ignore-1-crpgt started at 2023-01-14 06:34:01 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container c ready: false, restart count 0 Jan 14 06:38:40.169: INFO: pod-disruption-failure-ignore-1-5xwhv started at 2023-01-14 06:34:05 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container c ready: false, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-cf86c started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: inline-volume-tester-2fmpv started at 2023-01-14 06:34:58 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k776q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-gsthr started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: inline-volume-tester-v5nnb started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:38:40.169: INFO: pod-disruption-failure-ignore-1-frbqx started at 2023-01-14 06:34:08 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container c ready: false, restart count 0 Jan 14 06:38:40.169: INFO: test-ss-0 started at 2023-01-14 06:34:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container webserver ready: false, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-jzs4v started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dbmt8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: pod-disruption-failure-ignore-0-dbk78 started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container c ready: true, restart count 0 Jan 14 06:38:40.169: INFO: var-expansion-5d68387b-eca3-4e4a-a1bc-06454f2a1ada started at 2023-01-14 06:35:23 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container dapi-container ready: false, restart count 0 Jan 14 06:38:40.169: INFO: pod-disruption-failure-ignore-1-f5m4k started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container c ready: false, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rlwrq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.169: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ztghd started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.169: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.584: INFO: Latency metrics for node i-06bd219a44e00580c Jan 14 06:38:40.584: INFO: Logging node info for node i-0930a50194a147b36 Jan 14 06:38:40.689: INFO: Node Info: &Node{ObjectMeta:{i-0930a50194a147b36 4316b3c5-1eeb-4ee2-9818-40f99d51117d 14934 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0930a50194a147b36 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0930a50194a147b36 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.36.60 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0930a50194a147b36"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.1.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:37:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.1.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0930a50194a147b36,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.1.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:19 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.36.60,},NodeAddress{Type:ExternalIP,Address:15.237.49.122,},NodeAddress{Type:InternalDNS,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-49-122.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2ac5dccb44f409fdc575df19a0b9a7,SystemUUID:ec2ac5dc-cb44-f409-fdc5-75df19a0b9a7,BootID:9dff06f2-e51d-4b5e-a657-e8f546eded95,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:68d396900aeaa072c1f27289485fdac29834045a6f3ffe369bf389d830ef572d registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.6],SizeBytes:20293261,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:38:40.689: INFO: Logging kubelet events for node i-0930a50194a147b36 Jan 14 06:38:40.796: INFO: Logging pods the kubelet thinks is on node i-0930a50194a147b36 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rfg9z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-xxqp8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q9h4x started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: hostexec-i-0930a50194a147b36-sqt7d started at 2023-01-14 06:33:49 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dw26q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:37:43 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:38:40.908: INFO: cilium-75rxm started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:38:40.908: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-bb7qt started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g4xd9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: pod-subpath-test-preprovisionedpv-ck7k started at 2023-01-14 06:33:56 +0000 UTC (2+2 container statuses recorded) Jan 14 06:38:40.908: INFO: Init container init-volume-preprovisionedpv-ck7k ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Init container test-init-subpath-preprovisionedpv-ck7k ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container test-container-subpath-preprovisionedpv-ck7k ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container test-container-volume-preprovisionedpv-ck7k ready: false, restart count 0 Jan 14 06:38:40.908: INFO: ebs-csi-node-rpzft started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:40.908: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:38:40.908: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-pkvpm started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: coredns-559769c974-5xkn6 started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container coredns ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g6wnp started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zjjvx started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-slt2z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:40.908: INFO: coredns-autoscaler-7cb5c5b969-svc7j started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container autoscaler ready: true, restart count 0 Jan 14 06:38:40.908: INFO: netserver-2 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:40.908: INFO: Container webserver ready: false, restart count 0 Jan 14 06:38:40.908: INFO: csi-mockplugin-0 started at 2023-01-14 06:37:42 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:40.908: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container mock ready: false, restart count 0 Jan 14 06:38:40.908: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:33:36 +0000 UTC (0+7 container statuses recorded) Jan 14 06:38:40.908: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:38:40.908: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:38:41.332: INFO: Latency metrics for node i-0930a50194a147b36 Jan 14 06:38:41.332: INFO: Logging node info for node i-095cd924e787c9946 Jan 14 06:38:41.437: INFO: Node Info: &Node{ObjectMeta:{i-095cd924e787c9946 7ac98e5e-c131-42e0-a67e-ba9b45d163a4 15074 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-095cd924e787c9946 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-095cd924e787c9946 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.51.27 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-095cd924e787c9946"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.2.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:38:00 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.2.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-095cd924e787c9946,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.2.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:20 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.51.27,},NodeAddress{Type:ExternalIP,Address:13.38.27.88,},NodeAddress{Type:InternalDNS,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-27-88.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2449c051467854b20245f8e87294d1,SystemUUID:ec2449c0-5146-7854-b202-45f8e87294d1,BootID:11ed24c0-6b48-4372-960a-a4095c73f4ca,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/sample-apiserver@sha256:8d70890151aa5d096f331cb9da1b9cd5be0412b7363fe67b5c3befdcaa2a28d0 registry.k8s.io/e2e-test-images/sample-apiserver:1.17.7],SizeBytes:25667066,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:38:41.437: INFO: Logging kubelet events for node i-095cd924e787c9946 Jan 14 06:38:41.543: INFO: Logging pods the kubelet thinks is on node i-095cd924e787c9946 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-d9fvw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-8x9bt started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ppzfj started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: coredns-559769c974-lpb2c started at 2023-01-14 06:26:42 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container coredns ready: true, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k5vzv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: hostexec-i-095cd924e787c9946-c5rkj started at 2023-01-14 06:33:59 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:38:41.656: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:34 +0000 UTC (0+7 container statuses recorded) Jan 14 06:38:41.656: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:38:41.656: INFO: csi-mockplugin-resizer-0 started at 2023-01-14 06:33:29 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-79v7d started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: ebs-csi-node-q6j9r started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:41.656: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:38:41.656: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-hghxq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: hostexec-i-095cd924e787c9946-xb47h started at 2023-01-14 06:33:55 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:38:41.656: INFO: pod-subpath-test-preprovisionedpv-mhww started at 2023-01-14 06:34:11 +0000 UTC (2+2 container statuses recorded) Jan 14 06:38:41.656: INFO: Init container init-volume-preprovisionedpv-mhww ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Init container test-init-subpath-preprovisionedpv-mhww ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container test-container-subpath-preprovisionedpv-mhww ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container test-container-volume-preprovisionedpv-mhww ready: false, restart count 0 Jan 14 06:38:41.656: INFO: cilium-kpqdf started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:38:41.656: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:38:41.656: INFO: netserver-3 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container webserver ready: true, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-qnwnv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:29 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:41.656: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container mock ready: false, restart count 0 Jan 14 06:38:41.656: INFO: test-pod started at 2023-01-14 06:38:30 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container webserver ready: false, restart count 0 Jan 14 06:38:41.656: INFO: pod-subpath-test-preprovisionedpv-d4th started at 2023-01-14 06:34:13 +0000 UTC (0+2 container statuses recorded) Jan 14 06:38:41.656: INFO: Container test-container-subpath-preprovisionedpv-d4th ready: false, restart count 0 Jan 14 06:38:41.656: INFO: Container test-container-volume-preprovisionedpv-d4th ready: false, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8pkw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-7bdwh started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:38:41.656: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-94lp8 started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:41.656: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:38:42.034: INFO: Latency metrics for node i-095cd924e787c9946 Jan 14 06:38:42.034: INFO: Logging node info for node i-0ea715ad3f7d7c666 Jan 14 06:38:42.138: INFO: Node Info: &Node{ObjectMeta:{i-0ea715ad3f7d7c666 1b9ffdb4-6e31-4298-bf35-45383b8cddd4 14687 0 2023-01-14 06:24:19 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:c5.large beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kops.k8s.io/kops-controller-pki: kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0ea715ad3f7d7c666 kubernetes.io/os:linux node-role.kubernetes.io/control-plane: node.kubernetes.io/exclude-from-external-load-balancers: node.kubernetes.io/instance-type:c5.large topology.ebs.csi.aws.com/zone:eu-west-3a topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.43.108 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0ea715ad3f7d7c666"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2023-01-14 06:24:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {protokube Update v1 2023-01-14 06:24:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kops.k8s.io/kops-controller-pki":{},"f:node-role.kubernetes.io/control-plane":{},"f:node.kubernetes.io/exclude-from-external-load-balancers":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:25:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.0.0/24\"":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:taints":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kubelet Update v1 2023-01-14 06:36:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.0.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0ea715ad3f7d7c666,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/control-plane,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[100.96.0.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3892301824 0} {<nil>} 3801076Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3787444224 0} {<nil>} 3698676Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:25:06 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.43.108,},NodeAddress{Type:ExternalIP,Address:13.37.224.194,},NodeAddress{Type:InternalDNS,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-37-224-194.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec29ac40ecac712560f472ac147406f5,SystemUUID:ec29ac40-ecac-7125-60f4-72ac147406f5,BootID:6aefaddb-a8fb-42ca-b933-086be838242c,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/etcdadm/etcd-manager@sha256:66a453db625abb268f4b3bbefc5a34a171d81e6e8796cecca54cfd71775c77c4 registry.k8s.io/etcdadm/etcd-manager:v3.0.20221209],SizeBytes:231502799,},ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.26.0],SizeBytes:135162323,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.26.0],SizeBytes:124991801,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.26.0],SizeBytes:57657656,},ContainerImage{Names:[registry.k8s.io/kops/kops-controller:1.27.0-alpha.1],SizeBytes:43455400,},ContainerImage{Names:[registry.k8s.io/kops/dns-controller:1.27.0-alpha.1],SizeBytes:42802033,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[quay.io/cilium/operator@sha256:a6d24a006a6b92967ac90786b49bc1ac26e5477cf028cd1186efcfc2466484db quay.io/cilium/operator:v1.12.5],SizeBytes:26802430,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 registry.k8s.io/sig-storage/csi-provisioner:v3.1.0],SizeBytes:23345856,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 registry.k8s.io/sig-storage/csi-resizer:v1.4.0],SizeBytes:22381475,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b registry.k8s.io/sig-storage/csi-attacher:v3.4.0],SizeBytes:22085298,},ContainerImage{Names:[registry.k8s.io/provider-aws/cloud-controller-manager@sha256:fdeb61e3e42ecd9cca868d550ebdb88dd6341d9e91fcfa9a37e227dab2ad22cb registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.0],SizeBytes:20154862,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/kops/kube-apiserver-healthcheck:1.27.0-alpha.1],SizeBytes:4967345,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:38:42.138: INFO: Logging kubelet events for node i-0ea715ad3f7d7c666 Jan 14 06:38:42.245: INFO: Logging pods the kubelet thinks is on node i-0ea715ad3f7d7c666 Jan 14 06:38:42.356: INFO: aws-cloud-controller-manager-8g49k started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Container aws-cloud-controller-manager ready: true, restart count 0 Jan 14 06:38:42.356: INFO: cilium-operator-5dd44dc49f-hdhf7 started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Container cilium-operator ready: true, restart count 0 Jan 14 06:38:42.356: INFO: etcd-manager-events-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:38:42.356: INFO: kops-controller-8ntms started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Container kops-controller ready: true, restart count 0 Jan 14 06:38:42.356: INFO: kube-controller-manager-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Container kube-controller-manager ready: true, restart count 2 Jan 14 06:38:42.356: INFO: kube-scheduler-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Container kube-scheduler ready: true, restart count 0 Jan 14 06:38:42.356: INFO: cilium-vl5tq started at 2023-01-14 06:24:57 +0000 UTC (1+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:38:42.356: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:38:42.356: INFO: ebs-csi-node-knngk started at 2023-01-14 06:24:57 +0000 UTC (0+3 container statuses recorded) Jan 14 06:38:42.356: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:38:42.356: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:38:42.356: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:38:42.356: INFO: dns-controller-69987775c6-66b5p started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Container dns-controller ready: true, restart count 0 Jan 14 06:38:42.356: INFO: ebs-csi-controller-5bd98b456f-zxg2l started at 2023-01-14 06:24:57 +0000 UTC (0+5 container statuses recorded) Jan 14 06:38:42.356: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:38:42.356: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:38:42.356: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:38:42.356: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:38:42.356: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:38:42.356: INFO: etcd-manager-main-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:38:42.356: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:38:42.356: INFO: kube-apiserver-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+2 container statuses recorded) Jan 14 06:38:42.356: INFO: Container healthcheck ready: true, restart count 0 Jan 14 06:38:42.356: INFO: Container kube-apiserver ready: true, restart count 1 Jan 14 06:38:42.728: INFO: Latency metrics for node i-0ea715ad3f7d7c666 [DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 �[1mSTEP:�[0m Destroying namespace "deployment-6041" for this suite. �[38;5;243m01/14/23 06:38:42.728�[0m
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sStatefulSet\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sShould\srecreate\sevicted\sstatefulset\s\[Conformance\]$'
test/e2e/apps/statefulset.go:767 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.12() test/e2e/apps/statefulset.go:767 +0x60cfrom junit_01.xml
[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 �[1mSTEP:�[0m Creating a kubernetes client �[38;5;243m01/14/23 06:38:29.935�[0m Jan 14 06:38:29.935: INFO: >>> kubeConfig: /root/.kube/config �[1mSTEP:�[0m Building a namespace api object, basename statefulset �[38;5;243m01/14/23 06:38:29.937�[0m �[1mSTEP:�[0m Waiting for a default service account to be provisioned in namespace �[38;5;243m01/14/23 06:38:30.251�[0m �[1mSTEP:�[0m Waiting for kube-root-ca.crt to be provisioned in namespace �[38;5;243m01/14/23 06:38:30.458�[0m [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-apps] StatefulSet test/e2e/apps/statefulset.go:98 [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:113 �[1mSTEP:�[0m Creating service test in namespace statefulset-6824 �[38;5;243m01/14/23 06:38:30.665�[0m [It] Should recreate evicted statefulset [Conformance] test/e2e/apps/statefulset.go:739 �[1mSTEP:�[0m Looking for a node to schedule stateful set and pod �[38;5;243m01/14/23 06:38:30.771�[0m �[1mSTEP:�[0m Creating pod with conflicting port in namespace statefulset-6824 �[38;5;243m01/14/23 06:38:30.878�[0m �[1mSTEP:�[0m Waiting until pod test-pod will start running in namespace statefulset-6824 �[38;5;243m01/14/23 06:38:30.986�[0m Jan 14 06:38:30.986: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "statefulset-6824" to be "running" Jan 14 06:38:31.090: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 104.344768ms Jan 14 06:38:33.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.208971915s Jan 14 06:38:35.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4.209257716s Jan 14 06:38:37.194: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.208728685s Jan 14 06:38:39.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.210270167s Jan 14 06:38:41.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 10.209539316s Jan 14 06:38:43.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 12.209068956s Jan 14 06:38:45.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 14.20945228s Jan 14 06:38:47.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 16.209293948s Jan 14 06:38:49.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 18.209909736s Jan 14 06:38:51.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 20.209209985s Jan 14 06:38:53.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 22.209706945s Jan 14 06:38:55.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 24.209427385s Jan 14 06:38:57.194: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 26.208724992s Jan 14 06:38:59.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 28.2096233s Jan 14 06:39:01.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 30.209578913s Jan 14 06:39:03.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 32.209658448s Jan 14 06:39:05.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 34.209860948s Jan 14 06:39:07.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 36.209386788s Jan 14 06:39:09.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 38.209937409s Jan 14 06:39:11.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 40.209323841s Jan 14 06:39:13.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 42.208900856s Jan 14 06:39:15.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 44.209341046s Jan 14 06:39:17.194: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 46.208521201s Jan 14 06:39:19.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 48.209766194s Jan 14 06:39:21.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 50.209746077s Jan 14 06:39:23.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 52.209317455s Jan 14 06:39:25.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 54.209245499s Jan 14 06:39:27.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 56.209695599s Jan 14 06:39:29.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 58.21057619s Jan 14 06:39:31.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.209077783s Jan 14 06:39:33.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.208960305s Jan 14 06:39:35.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.209605882s Jan 14 06:39:37.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.208940373s Jan 14 06:39:39.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.209713467s Jan 14 06:39:41.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.209661624s Jan 14 06:39:43.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.209571567s Jan 14 06:39:45.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.209851824s Jan 14 06:39:47.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.209166779s Jan 14 06:39:49.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.208937584s Jan 14 06:39:51.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.209628177s Jan 14 06:39:53.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.209613561s Jan 14 06:39:55.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.20962692s Jan 14 06:39:57.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.208904774s Jan 14 06:39:59.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.209157233s Jan 14 06:40:01.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.209707618s Jan 14 06:40:03.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.209248421s Jan 14 06:40:05.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.209866369s Jan 14 06:40:07.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.209448548s Jan 14 06:40:09.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.209299585s Jan 14 06:40:11.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.209713727s Jan 14 06:40:13.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.209063259s Jan 14 06:40:15.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.209349286s Jan 14 06:40:17.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.209217734s Jan 14 06:40:19.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.209280102s Jan 14 06:40:21.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.209540185s Jan 14 06:40:23.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.20930228s Jan 14 06:40:25.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.209085641s Jan 14 06:40:27.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.209254878s Jan 14 06:40:29.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.209361747s Jan 14 06:40:31.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.209313742s Jan 14 06:40:33.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m2.209363065s Jan 14 06:40:35.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m4.209497317s Jan 14 06:40:37.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m6.209045758s Jan 14 06:40:39.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m8.208880345s Jan 14 06:40:41.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m10.209355036s Jan 14 06:40:43.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m12.209668925s Jan 14 06:40:45.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m14.209349022s Jan 14 06:40:47.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m16.209199044s Jan 14 06:40:49.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m18.209244729s Jan 14 06:40:51.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m20.209509178s Jan 14 06:40:53.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m22.209213178s Jan 14 06:40:55.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m24.209408369s Jan 14 06:40:57.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m26.209134768s Jan 14 06:40:59.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m28.209386224s Jan 14 06:41:01.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m30.2095125s Jan 14 06:41:03.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m32.208844879s Jan 14 06:41:05.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m34.209290756s Jan 14 06:41:07.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m36.209102595s Jan 14 06:41:09.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m38.209375486s Jan 14 06:41:11.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m40.209647544s Jan 14 06:41:13.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m42.2092256s Jan 14 06:41:15.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m44.209392592s Jan 14 06:41:17.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m46.209270304s Jan 14 06:41:19.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m48.209241144s Jan 14 06:41:21.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m50.209246754s Jan 14 06:41:23.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m52.209359975s Jan 14 06:41:25.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m54.209332238s Jan 14 06:41:27.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m56.209269157s Jan 14 06:41:29.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2m58.209491043s Jan 14 06:41:31.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m0.209485789s Jan 14 06:41:33.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m2.209313238s Jan 14 06:41:35.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m4.209602753s Jan 14 06:41:37.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m6.209293897s Jan 14 06:41:39.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m8.209511716s Jan 14 06:41:41.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m10.209279558s Jan 14 06:41:43.194: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m12.208732521s Jan 14 06:41:45.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m14.209686559s Jan 14 06:41:47.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m16.20887443s Jan 14 06:41:49.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m18.209208568s Jan 14 06:41:51.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m20.209164661s Jan 14 06:41:53.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m22.209219098s Jan 14 06:41:55.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m24.209973648s Jan 14 06:41:57.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m26.208820926s Jan 14 06:41:59.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m28.209498629s Jan 14 06:42:01.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m30.209587927s Jan 14 06:42:03.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m32.209556704s Jan 14 06:42:05.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m34.209593027s Jan 14 06:42:07.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m36.208884776s Jan 14 06:42:09.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m38.2087853s Jan 14 06:42:11.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m40.209697557s Jan 14 06:42:13.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m42.20933851s Jan 14 06:42:15.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m44.209785493s Jan 14 06:42:17.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m46.209526293s Jan 14 06:42:19.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m48.209711891s Jan 14 06:42:21.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m50.209247939s Jan 14 06:42:23.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m52.208904873s Jan 14 06:42:25.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m54.209726547s Jan 14 06:42:27.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m56.209094745s Jan 14 06:42:29.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 3m58.209715125s Jan 14 06:42:31.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m0.209675154s Jan 14 06:42:33.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m2.20944761s Jan 14 06:42:35.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m4.209855939s Jan 14 06:42:37.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m6.209645144s Jan 14 06:42:39.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m8.209588115s Jan 14 06:42:41.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m10.209468581s Jan 14 06:42:43.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m12.20924791s Jan 14 06:42:45.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m14.209650993s Jan 14 06:42:47.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m16.209507695s Jan 14 06:42:49.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m18.208973369s Jan 14 06:42:51.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m20.209464219s Jan 14 06:42:53.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m22.209716503s Jan 14 06:42:55.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m24.209065381s Jan 14 06:42:57.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m26.209553907s Jan 14 06:42:59.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m28.209885949s Jan 14 06:43:01.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m30.209102166s Jan 14 06:43:03.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m32.209530039s Jan 14 06:43:05.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m34.209519388s Jan 14 06:43:07.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m36.209095806s Jan 14 06:43:09.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m38.20950543s Jan 14 06:43:11.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m40.209745205s Jan 14 06:43:13.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m42.209721084s Jan 14 06:43:15.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m44.209587458s Jan 14 06:43:17.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m46.208958566s Jan 14 06:43:19.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m48.209271364s Jan 14 06:43:21.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m50.20979568s Jan 14 06:43:23.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m52.208831683s Jan 14 06:43:25.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m54.209103463s Jan 14 06:43:27.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m56.208980539s Jan 14 06:43:29.195: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4m58.209727075s Jan 14 06:43:31.196: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.209778428s Jan 14 06:43:31.300: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.314088751s Jan 14 06:43:31.300: FAIL: Pod test-pod did not start running: timed out while waiting for pod statefulset-6824/test-pod to be running Full Stack Trace k8s.io/kubernetes/test/e2e/apps.glob..func10.2.12() test/e2e/apps/statefulset.go:767 +0x60c [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:124 Jan 14 06:43:31.405: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=statefulset-6824 describe po test-pod' Jan 14 06:43:32.016: INFO: stderr: "" Jan 14 06:43:32.016: INFO: stdout: "Name: test-pod\nNamespace: statefulset-6824\nPriority: 0\nService Account: default\nNode: i-095cd924e787c9946/172.20.51.27\nStart Time: Sat, 14 Jan 2023 06:38:30 +0000\nLabels: <none>\nAnnotations: <none>\nStatus: Pending\nIP: \nIPs: <none>\nContainers:\n webserver:\n Container ID: \n Image: registry.k8s.io/e2e-test-images/httpd:2.4.38-4\n Image ID: \n Port: 21017/TCP\n Host Port: 21017/TCP\n State: Waiting\n Reason: ContainerCreating\n Ready: False\n Restart Count: 0\n Environment: <none>\n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-58ftd (ro)\nConditions:\n Type Status\n Initialized True \n Ready False \n ContainersReady False \n PodScheduled True \nVolumes:\n kube-api-access-58ftd:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: <nil>\n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: <none>\nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Warning FailedCreatePodSandBox 5m1s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"a44096e624acd8df1eb3725583e3d8815103465844be75d85bf905045588d32d\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 4m47s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"794b292dc286a9ec3f7142d0ee03355a691053b383e5a2a2e660d0a20a53c82a\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 4m34s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"14284915823ccb1a359a5d122a6eeb5114e8141e63cd5043bdeaabf9fe7e909d\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 4m21s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"aebf37fc29a3b505542161bd8fb577f87b7c804c0cb9daeb34159fd738567d06\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 4m8s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"cae7202a12fd10c755e6206ee6ed567510c1908bcedf0b8f7b79f0810e285150\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 3m54s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"4581f717a659005f63704e6e96f85d391333bb83b49d3f47f0fc2dc10f0dd745\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 3m39s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"e62c541dd1f056be8d627e57c24c2aacc576d4e4261d11c8c760b87c4c9de38b\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 3m26s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"3e1e2ce255504b8dfe83af28d0741ce29a4e77c1563d846767f30f11996fc58d\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 3m14s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"92151def47e1c93bb695b637dd34f9dfb10de87b2fdeeb31eb488932a3c90e6b\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n Warning FailedCreatePodSandBox 11s (x14 over 3m) kubelet (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox \"093f7a145a4f23d5f72170e2fbca0009e896ebd05e9d581d5e946f4d4331f9f3\": plugin type=\"cilium-cni\" name=\"cilium\" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available\n" Jan 14 06:43:32.016: INFO: Output of kubectl describe test-pod: Name: test-pod Namespace: statefulset-6824 Priority: 0 Service Account: default Node: i-095cd924e787c9946/172.20.51.27 Start Time: Sat, 14 Jan 2023 06:38:30 +0000 Labels: <none> Annotations: <none> Status: Pending IP: IPs: <none> Containers: webserver: Container ID: Image: registry.k8s.io/e2e-test-images/httpd:2.4.38-4 Image ID: Port: 21017/TCP Host Port: 21017/TCP State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: <none> Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-58ftd (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: kube-api-access-58ftd: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: <nil> DownwardAPI: true QoS Class: BestEffort Node-Selectors: <none> Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedCreatePodSandBox 5m1s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a44096e624acd8df1eb3725583e3d8815103465844be75d85bf905045588d32d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 4m47s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "794b292dc286a9ec3f7142d0ee03355a691053b383e5a2a2e660d0a20a53c82a": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 4m34s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14284915823ccb1a359a5d122a6eeb5114e8141e63cd5043bdeaabf9fe7e909d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 4m21s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "aebf37fc29a3b505542161bd8fb577f87b7c804c0cb9daeb34159fd738567d06": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 4m8s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cae7202a12fd10c755e6206ee6ed567510c1908bcedf0b8f7b79f0810e285150": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 3m54s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4581f717a659005f63704e6e96f85d391333bb83b49d3f47f0fc2dc10f0dd745": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 3m39s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e62c541dd1f056be8d627e57c24c2aacc576d4e4261d11c8c760b87c4c9de38b": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 3m26s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "3e1e2ce255504b8dfe83af28d0741ce29a4e77c1563d846767f30f11996fc58d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 3m14s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "92151def47e1c93bb695b637dd34f9dfb10de87b2fdeeb31eb488932a3c90e6b": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Warning FailedCreatePodSandBox 11s (x14 over 3m) kubelet (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "093f7a145a4f23d5f72170e2fbca0009e896ebd05e9d581d5e946f4d4331f9f3": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:32.017: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=statefulset-6824 logs test-pod --tail=100' Jan 14 06:43:32.523: INFO: rc: 1 Jan 14 06:43:32.523: INFO: Last 100 log lines of test-pod: Jan 14 06:43:32.523: INFO: Deleting all statefulset in ns statefulset-6824 [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 Jan 14 06:43:32.835: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 �[1mSTEP:�[0m dump namespace information after failure �[38;5;243m01/14/23 06:43:32.94�[0m �[1mSTEP:�[0m Collecting events from namespace "statefulset-6824". �[38;5;243m01/14/23 06:43:32.941�[0m �[1mSTEP:�[0m Found 10 events. �[38;5;243m01/14/23 06:43:33.045�[0m Jan 14 06:43:33.045: INFO: At 2023-01-14 06:38:31 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a44096e624acd8df1eb3725583e3d8815103465844be75d85bf905045588d32d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:38:45 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "794b292dc286a9ec3f7142d0ee03355a691053b383e5a2a2e660d0a20a53c82a": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:38:58 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14284915823ccb1a359a5d122a6eeb5114e8141e63cd5043bdeaabf9fe7e909d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:39:11 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "aebf37fc29a3b505542161bd8fb577f87b7c804c0cb9daeb34159fd738567d06": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:39:24 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cae7202a12fd10c755e6206ee6ed567510c1908bcedf0b8f7b79f0810e285150": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:39:38 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4581f717a659005f63704e6e96f85d391333bb83b49d3f47f0fc2dc10f0dd745": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:39:53 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e62c541dd1f056be8d627e57c24c2aacc576d4e4261d11c8c760b87c4c9de38b": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:40:06 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "3e1e2ce255504b8dfe83af28d0741ce29a4e77c1563d846767f30f11996fc58d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:40:18 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "92151def47e1c93bb695b637dd34f9dfb10de87b2fdeeb31eb488932a3c90e6b": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.045: INFO: At 2023-01-14 06:40:32 +0000 UTC - event for test-pod: {kubelet i-095cd924e787c9946} FailedCreatePodSandBox: (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "2283377e3d9d84264c6fcc3790c12b72cbc11a3e6b7393dc50b993edb0e78b2b": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:33.149: INFO: POD NODE PHASE GRACE CONDITIONS Jan 14 06:43:33.149: INFO: test-pod i-095cd924e787c9946 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:38:30 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:38:30 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:38:30 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:38:30 +0000 UTC }] Jan 14 06:43:33.149: INFO: Jan 14 06:43:33.259: INFO: Unable to fetch statefulset-6824/test-pod/webserver logs: the server rejected our request for an unknown reason (get pods test-pod) Jan 14 06:43:33.364: INFO: Logging node info for node i-0526f6963633e8375 Jan 14 06:43:33.468: INFO: Node Info: &Node{ObjectMeta:{i-0526f6963633e8375 b8bbb07c-e234-4117-968a-d4f54d957b46 16511 0 2023-01-14 06:26:11 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a io.kubernetes.storage.mock/node:some-mock-node kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0526f6963633e8375 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0526f6963633e8375 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.59.50 csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-4581":"i-0526f6963633e8375","ebs.csi.aws.com":"i-0526f6963633e8375"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.4.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:39:21 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:41:47 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:io.kubernetes.storage.mock/node":{},"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.4.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0526f6963633e8375,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.4.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:47 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:47 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:47 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:41:47 +0000 UTC,LastTransitionTime:2023-01-14 06:26:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.59.50,},NodeAddress{Type:ExternalIP,Address:13.38.88.176,},NodeAddress{Type:InternalDNS,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-88-176.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec24f53d700a2e9399be7e5e2cc1e943,SystemUUID:ec24f53d-700a-2e93-99be-7e5e2cc1e943,BootID:58d231c1-9ab3-4e54-9948-319cfad92d73,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[gcr.io/authenticated-image-pulling/alpine@sha256:7ff177862cb50c602bfe81f805969412e619c054a2bbead977d0c276988aa4a0 gcr.io/authenticated-image-pulling/alpine:3.7],SizeBytes:2110879,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4581^2cadbc7d-93d6-11ed-858d-76d972479176],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4581^2cadbc7d-93d6-11ed-858d-76d972479176,DevicePath:,},},Config:nil,},} Jan 14 06:43:33.468: INFO: Logging kubelet events for node i-0526f6963633e8375 Jan 14 06:43:33.574: INFO: Logging pods the kubelet thinks is on node i-0526f6963633e8375 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6xkx2 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-kvr9w started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:43:33.685: INFO: suspend-false-to-true-cbsf6 started at 2023-01-14 06:39:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container c ready: false, restart count 0 Jan 14 06:43:33.685: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:36 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:33.685: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:43:33.685: INFO: Container driver-registrar ready: true, restart count 0 Jan 14 06:43:33.685: INFO: Container mock ready: true, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-crc8k started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: pvc-volume-tester-lrm5k started at 2023-01-14 06:39:20 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container volume-tester ready: false, restart count 0 Jan 14 06:43:33.685: INFO: security-context-9dbfb503-c405-47ac-9294-6ea974357a5f started at 2023-01-14 06:38:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container test-container ready: false, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-66vns started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:33:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8z92 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: ebs-csi-node-r8qfk started at 2023-01-14 06:26:12 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:33.685: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:33.685: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-mh56b started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: update-demo-nautilus-mrn8r started at 2023-01-14 06:38:46 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container update-demo ready: true, restart count 0 Jan 14 06:43:33.685: INFO: cilium-tv25q started at 2023-01-14 06:26:12 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:33.685: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:33.685: INFO: pod-projected-configmaps-bb73475c-05a7-4e4e-8b32-9886f2febf29 started at 2023-01-14 06:39:21 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:43:33.685: INFO: bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb started at 2023-01-14 06:38:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb ready: false, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-v4q7f started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-l2cw9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-p4cww started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:33.685: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ts46q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:33.685: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.098: INFO: Latency metrics for node i-0526f6963633e8375 Jan 14 06:43:34.098: INFO: Logging node info for node i-06bd219a44e00580c Jan 14 06:43:34.202: INFO: Node Info: &Node{ObjectMeta:{i-06bd219a44e00580c c2a57daf-87e6-4c31-ab8d-158cf1752c85 16062 0 2023-01-14 06:26:09 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-06bd219a44e00580c kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-06bd219a44e00580c topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.61.252 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-06bd219a44e00580c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.3.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:35:28 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:40:38 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.3.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-06bd219a44e00580c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.3.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:21 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.61.252,},NodeAddress{Type:ExternalIP,Address:15.237.110.205,},NodeAddress{Type:InternalDNS,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-110-205.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec28b615c9f51208890a610e546cafd1,SystemUUID:ec28b615-c9f5-1208-890a-610e546cafd1,BootID:9cfe2407-336f-468c-b599-1b87cbc71140,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847 kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4 kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4,DevicePath:,},},Config:nil,},} Jan 14 06:43:34.202: INFO: Logging kubelet events for node i-06bd219a44e00580c Jan 14 06:43:34.309: INFO: Logging pods the kubelet thinks is on node i-06bd219a44e00580c Jan 14 06:43:34.425: INFO: pod-disruption-failure-ignore-1-ghlhh started at 2023-01-14 06:34:12 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zbhxz started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:13 +0000 UTC (0+7 container statuses recorded) Jan 14 06:43:34.425: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:43:34.425: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:43:34.425: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:43:34.425: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:43:34.425: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:43:34.425: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:43:34.425: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cilium-k6c6s started at 2023-01-14 06:26:10 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:34.425: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:34.425: INFO: downwardapi-volume-75c6efde-4aed-47bb-8563-2d685d442490 started at 2023-01-14 06:39:23 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container client-container ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q2x2n started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: ebs-csi-node-62qzb started at 2023-01-14 06:26:10 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:34.425: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:34.425: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:34.425: INFO: liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 started at 2023-01-14 06:39:09 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:43:34.425: INFO: pod-disruption-failure-ignore-1-crpgt started at 2023-01-14 06:34:01 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: false, restart count 0 Jan 14 06:43:34.425: INFO: pod-edd099ae-7d89-4db8-9a86-b238ae68aba9 started at 2023-01-14 06:39:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container write-pod ready: true, restart count 0 Jan 14 06:43:34.425: INFO: pod-disruption-failure-ignore-1-5xwhv started at 2023-01-14 06:34:05 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-cf86c started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: inline-volume-tester-2fmpv started at 2023-01-14 06:34:58 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k776q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-gsthr started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: inline-volume-tester-v5nnb started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:43:34.425: INFO: pod-disruption-failure-ignore-1-frbqx started at 2023-01-14 06:34:08 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: false, restart count 0 Jan 14 06:43:34.425: INFO: test-ss-0 started at 2023-01-14 06:34:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container webserver ready: false, restart count 0 Jan 14 06:43:34.425: INFO: dns-test-1b49684b-e070-4d75-b586-93f0f22c501e started at 2023-01-14 06:39:25 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:34.425: INFO: Container jessie-querier ready: false, restart count 0 Jan 14 06:43:34.425: INFO: Container querier ready: false, restart count 0 Jan 14 06:43:34.425: INFO: Container webserver ready: false, restart count 0 Jan 14 06:43:34.425: INFO: suspend-false-to-true-w87tb started at 2023-01-14 06:39:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-jzs4v started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: pod-configmaps-fbe406e3-6009-41bb-911b-acb91067c9bd started at 2023-01-14 06:41:33 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:43:34.425: INFO: hostexec-i-06bd219a44e00580c-vkgh8 started at 2023-01-14 06:39:10 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:43:34.425: INFO: pod-e030d7c2-b127-467e-8064-fd944a65c42f started at 2023-01-14 06:40:51 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container write-pod ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dbmt8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: pod-disruption-failure-ignore-0-dbk78 started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: true, restart count 0 Jan 14 06:43:34.425: INFO: pod-disruption-failure-ignore-1-f5m4k started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rlwrq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ztghd started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: update-demo-nautilus-wzc78 started at 2023-01-14 06:38:46 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container update-demo ready: false, restart count 0 Jan 14 06:43:34.425: INFO: pod-disruption-failure-ignore-1-xjff6 started at 2023-01-14 06:34:27 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: false, restart count 0 Jan 14 06:43:34.425: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6nwrq started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:34.425: INFO: pod-disruption-failure-ignore-1-6gwgt started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container c ready: false, restart count 0 Jan 14 06:43:34.425: INFO: pod-db681d65-3e7a-406a-aa51-b57c4ce4869e started at 2023-01-14 06:39:09 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container test-container ready: false, restart count 0 Jan 14 06:43:34.425: INFO: pod-exec-websocket-7f4605b9-4d8c-489c-860b-2b70c44af97b started at 2023-01-14 06:41:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container main ready: false, restart count 0 Jan 14 06:43:34.425: INFO: busybox-a5ef1bf2-f510-44d1-a72c-3211b0bd56f9 started at 2023-01-14 06:41:33 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:34.425: INFO: Container busybox ready: false, restart count 0 Jan 14 06:43:34.879: INFO: Latency metrics for node i-06bd219a44e00580c Jan 14 06:43:34.879: INFO: Logging node info for node i-0930a50194a147b36 Jan 14 06:43:34.984: INFO: Node Info: &Node{ObjectMeta:{i-0930a50194a147b36 4316b3c5-1eeb-4ee2-9818-40f99d51117d 16789 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0930a50194a147b36 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0930a50194a147b36 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.36.60 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0930a50194a147b36"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.1.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:42:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.1.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0930a50194a147b36,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.1.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:42:44 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:42:44 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:42:44 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:42:44 +0000 UTC,LastTransitionTime:2023-01-14 06:26:19 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.36.60,},NodeAddress{Type:ExternalIP,Address:15.237.49.122,},NodeAddress{Type:InternalDNS,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-49-122.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2ac5dccb44f409fdc575df19a0b9a7,SystemUUID:ec2ac5dc-cb44-f409-fdc5-75df19a0b9a7,BootID:9dff06f2-e51d-4b5e-a657-e8f546eded95,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:68d396900aeaa072c1f27289485fdac29834045a6f3ffe369bf389d830ef572d registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.6],SizeBytes:20293261,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:43:34.984: INFO: Logging kubelet events for node i-0930a50194a147b36 Jan 14 06:43:35.090: INFO: Logging pods the kubelet thinks is on node i-0930a50194a147b36 Jan 14 06:43:35.201: INFO: csi-mockplugin-0 started at 2023-01-14 06:37:42 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:35.201: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:43:35.201: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:43:35.201: INFO: Container mock ready: false, restart count 0 Jan 14 06:43:35.201: INFO: coredns-autoscaler-7cb5c5b969-svc7j started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.201: INFO: Container autoscaler ready: true, restart count 0 Jan 14 06:43:35.201: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-xxqp8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.201: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.201: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q9h4x started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.201: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.201: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rfg9z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.201: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.201: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:37:43 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.201: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:43:35.201: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:42:07 +0000 UTC (0+7 container statuses recorded) Jan 14 06:43:35.201: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:43:35.201: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:43:35.201: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:43:35.201: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:43:35.201: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:43:35.201: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:43:35.202: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:43:35.202: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dw26q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.202: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-bb7qt started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.202: INFO: cilium-75rxm started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:35.202: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:35.202: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g4xd9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.202: INFO: ebs-csi-node-rpzft started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:35.202: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:35.202: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:35.202: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:35.202: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zjjvx started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.202: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-pkvpm started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.202: INFO: coredns-559769c974-5xkn6 started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Container coredns ready: true, restart count 0 Jan 14 06:43:35.202: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g6wnp started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.202: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-slt2z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.202: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.596: INFO: Latency metrics for node i-0930a50194a147b36 Jan 14 06:43:35.596: INFO: Logging node info for node i-095cd924e787c9946 Jan 14 06:43:35.700: INFO: Node Info: &Node{ObjectMeta:{i-095cd924e787c9946 7ac98e5e-c131-42e0-a67e-ba9b45d163a4 16176 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-095cd924e787c9946 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-095cd924e787c9946 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.51.27 csi.volume.kubernetes.io/nodeid:{"csi-hostpath-ephemeral-9794":"i-095cd924e787c9946","ebs.csi.aws.com":"i-095cd924e787c9946"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.2.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:40:50 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:40:53 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.2.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-095cd924e787c9946,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.2.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:53 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:53 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:53 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:40:53 +0000 UTC,LastTransitionTime:2023-01-14 06:26:20 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.51.27,},NodeAddress{Type:ExternalIP,Address:13.38.27.88,},NodeAddress{Type:InternalDNS,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-27-88.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2449c051467854b20245f8e87294d1,SystemUUID:ec2449c0-5146-7854-b202-45f8e87294d1,BootID:11ed24c0-6b48-4372-960a-a4095c73f4ca,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/sample-apiserver@sha256:8d70890151aa5d096f331cb9da1b9cd5be0412b7363fe67b5c3befdcaa2a28d0 registry.k8s.io/e2e-test-images/sample-apiserver:1.17.7],SizeBytes:25667066,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-ephemeral-9794^61c31778-93d6-11ed-8af7-82bdff3b6028],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-ephemeral-9794^61c31778-93d6-11ed-8af7-82bdff3b6028,DevicePath:,},},Config:nil,},} Jan 14 06:43:35.700: INFO: Logging kubelet events for node i-095cd924e787c9946 Jan 14 06:43:35.806: INFO: Logging pods the kubelet thinks is on node i-095cd924e787c9946 Jan 14 06:43:35.917: INFO: ebs-csi-node-q6j9r started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:35.917: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-hghxq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: csi-mockplugin-0 started at 2023-01-14 06:43:00 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:35.917: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:43:35.917: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:43:35.917: INFO: Container mock ready: false, restart count 0 Jan 14 06:43:35.917: INFO: cilium-kpqdf started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-qnwnv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:43:00 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8pkw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-7bdwh started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-94lp8 started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: test-pod started at 2023-01-14 06:38:30 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container webserver ready: false, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-d9fvw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-8x9bt started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ppzfj started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: coredns-559769c974-lpb2c started at 2023-01-14 06:26:42 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container coredns ready: true, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k5vzv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: inline-volume-tester-xl4dq started at 2023-01-14 06:40:49 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:43:35.917: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:34 +0000 UTC (0+7 container statuses recorded) Jan 14 06:43:35.917: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container csi-snapshotter ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container hostpath ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:35.917: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:35.917: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-79v7d started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:35.917: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:36.408: INFO: Latency metrics for node i-095cd924e787c9946 Jan 14 06:43:36.408: INFO: Logging node info for node i-0ea715ad3f7d7c666 Jan 14 06:43:36.512: INFO: Node Info: &Node{ObjectMeta:{i-0ea715ad3f7d7c666 1b9ffdb4-6e31-4298-bf35-45383b8cddd4 16475 0 2023-01-14 06:24:19 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:c5.large beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kops.k8s.io/kops-controller-pki: kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0ea715ad3f7d7c666 kubernetes.io/os:linux node-role.kubernetes.io/control-plane: node.kubernetes.io/exclude-from-external-load-balancers: node.kubernetes.io/instance-type:c5.large topology.ebs.csi.aws.com/zone:eu-west-3a topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.43.108 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0ea715ad3f7d7c666"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2023-01-14 06:24:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {protokube Update v1 2023-01-14 06:24:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kops.k8s.io/kops-controller-pki":{},"f:node-role.kubernetes.io/control-plane":{},"f:node.kubernetes.io/exclude-from-external-load-balancers":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:25:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.0.0/24\"":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:taints":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kubelet Update v1 2023-01-14 06:41:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.0.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0ea715ad3f7d7c666,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/control-plane,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[100.96.0.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3892301824 0} {<nil>} 3801076Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3787444224 0} {<nil>} 3698676Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:39 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:39 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:39 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:41:39 +0000 UTC,LastTransitionTime:2023-01-14 06:25:06 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.43.108,},NodeAddress{Type:ExternalIP,Address:13.37.224.194,},NodeAddress{Type:InternalDNS,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-37-224-194.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec29ac40ecac712560f472ac147406f5,SystemUUID:ec29ac40-ecac-7125-60f4-72ac147406f5,BootID:6aefaddb-a8fb-42ca-b933-086be838242c,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/etcdadm/etcd-manager@sha256:66a453db625abb268f4b3bbefc5a34a171d81e6e8796cecca54cfd71775c77c4 registry.k8s.io/etcdadm/etcd-manager:v3.0.20221209],SizeBytes:231502799,},ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.26.0],SizeBytes:135162323,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.26.0],SizeBytes:124991801,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.26.0],SizeBytes:57657656,},ContainerImage{Names:[registry.k8s.io/kops/kops-controller:1.27.0-alpha.1],SizeBytes:43455400,},ContainerImage{Names:[registry.k8s.io/kops/dns-controller:1.27.0-alpha.1],SizeBytes:42802033,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[quay.io/cilium/operator@sha256:a6d24a006a6b92967ac90786b49bc1ac26e5477cf028cd1186efcfc2466484db quay.io/cilium/operator:v1.12.5],SizeBytes:26802430,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 registry.k8s.io/sig-storage/csi-provisioner:v3.1.0],SizeBytes:23345856,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 registry.k8s.io/sig-storage/csi-resizer:v1.4.0],SizeBytes:22381475,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b registry.k8s.io/sig-storage/csi-attacher:v3.4.0],SizeBytes:22085298,},ContainerImage{Names:[registry.k8s.io/provider-aws/cloud-controller-manager@sha256:fdeb61e3e42ecd9cca868d550ebdb88dd6341d9e91fcfa9a37e227dab2ad22cb registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.0],SizeBytes:20154862,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/kops/kube-apiserver-healthcheck:1.27.0-alpha.1],SizeBytes:4967345,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:43:36.512: INFO: Logging kubelet events for node i-0ea715ad3f7d7c666 Jan 14 06:43:36.622: INFO: Logging pods the kubelet thinks is on node i-0ea715ad3f7d7c666 Jan 14 06:43:36.731: INFO: etcd-manager-events-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:43:36.731: INFO: kops-controller-8ntms started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Container kops-controller ready: true, restart count 0 Jan 14 06:43:36.731: INFO: aws-cloud-controller-manager-8g49k started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Container aws-cloud-controller-manager ready: true, restart count 0 Jan 14 06:43:36.731: INFO: cilium-operator-5dd44dc49f-hdhf7 started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Container cilium-operator ready: true, restart count 0 Jan 14 06:43:36.731: INFO: ebs-csi-controller-5bd98b456f-zxg2l started at 2023-01-14 06:24:57 +0000 UTC (0+5 container statuses recorded) Jan 14 06:43:36.731: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:43:36.731: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:43:36.731: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:43:36.731: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:36.731: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:36.731: INFO: etcd-manager-main-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:43:36.731: INFO: kube-apiserver-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+2 container statuses recorded) Jan 14 06:43:36.731: INFO: Container healthcheck ready: true, restart count 0 Jan 14 06:43:36.731: INFO: Container kube-apiserver ready: true, restart count 1 Jan 14 06:43:36.731: INFO: kube-controller-manager-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Container kube-controller-manager ready: true, restart count 2 Jan 14 06:43:36.731: INFO: kube-scheduler-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Container kube-scheduler ready: true, restart count 0 Jan 14 06:43:36.731: INFO: cilium-vl5tq started at 2023-01-14 06:24:57 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:36.731: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:36.731: INFO: ebs-csi-node-knngk started at 2023-01-14 06:24:57 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:36.731: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:36.731: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:36.731: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:36.731: INFO: dns-controller-69987775c6-66b5p started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:36.731: INFO: Container dns-controller ready: true, restart count 0 Jan 14 06:43:37.101: INFO: Latency metrics for node i-0ea715ad3f7d7c666 [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 �[1mSTEP:�[0m Destroying namespace "statefulset-6824" for this suite. �[38;5;243m01/14/23 06:43:37.101�[0m
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\sclient\sUpdate\sDemo\sshould\screate\sand\sstop\sa\sreplication\scontroller\s\s\[Conformance\]$'
test/e2e/kubectl/kubectl.go:2431 k8s.io/kubernetes/test/e2e/kubectl.validateController({0x801e128, 0xc001bd7380}, {0xc0008b92f0?, 0x0?}, 0x2, {0x75cf5a1, 0xb}, {0x75e78bb, 0x10}, 0xc003747740, ...) test/e2e/kubectl/kubectl.go:2431 +0x49d k8s.io/kubernetes/test/e2e/kubectl.glob..func1.6.2() test/e2e/kubectl/kubectl.go:344 +0x1ecfrom junit_01.xml
[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 �[1mSTEP:�[0m Creating a kubernetes client �[38;5;243m01/14/23 06:38:45.122�[0m Jan 14 06:38:45.122: INFO: >>> kubeConfig: /root/.kube/config �[1mSTEP:�[0m Building a namespace api object, basename kubectl �[38;5;243m01/14/23 06:38:45.123�[0m �[1mSTEP:�[0m Waiting for a default service account to be provisioned in namespace �[38;5;243m01/14/23 06:38:45.437�[0m �[1mSTEP:�[0m Waiting for kube-root-ca.crt to be provisioned in namespace �[38;5;243m01/14/23 06:38:45.644�[0m [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 [BeforeEach] Update Demo test/e2e/kubectl/kubectl.go:326 [It] should create and stop a replication controller [Conformance] test/e2e/kubectl/kubectl.go:339 �[1mSTEP:�[0m creating a replication controller �[38;5;243m01/14/23 06:38:45.851�[0m Jan 14 06:38:45.851: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 create -f -' Jan 14 06:38:46.532: INFO: stderr: "" Jan 14 06:38:46.532: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" �[1mSTEP:�[0m waiting for all containers in name=update-demo pods to come up. �[38;5;243m01/14/23 06:38:46.532�[0m Jan 14 06:38:46.532: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:38:47.063: INFO: stderr: "" Jan 14 06:38:47.063: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:38:47.063: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:38:47.491: INFO: stderr: "" Jan 14 06:38:47.491: INFO: stdout: "" Jan 14 06:38:47.491: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:38:52.492: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:38:53.008: INFO: stderr: "" Jan 14 06:38:53.008: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:38:53.008: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:38:53.419: INFO: stderr: "" Jan 14 06:38:53.419: INFO: stdout: "" Jan 14 06:38:53.419: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:38:58.420: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:38:58.942: INFO: stderr: "" Jan 14 06:38:58.942: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:38:58.942: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:38:59.340: INFO: stderr: "" Jan 14 06:38:59.340: INFO: stdout: "" Jan 14 06:38:59.340: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:04.341: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:04.847: INFO: stderr: "" Jan 14 06:39:04.847: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:04.847: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:05.254: INFO: stderr: "" Jan 14 06:39:05.254: INFO: stdout: "" Jan 14 06:39:05.254: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:10.254: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:10.760: INFO: stderr: "" Jan 14 06:39:10.760: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:10.760: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:11.163: INFO: stderr: "" Jan 14 06:39:11.163: INFO: stdout: "" Jan 14 06:39:11.163: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:16.163: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:16.679: INFO: stderr: "" Jan 14 06:39:16.679: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:16.679: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:17.076: INFO: stderr: "" Jan 14 06:39:17.076: INFO: stdout: "" Jan 14 06:39:17.076: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:22.077: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:22.593: INFO: stderr: "" Jan 14 06:39:22.593: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:22.593: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:22.995: INFO: stderr: "" Jan 14 06:39:22.995: INFO: stdout: "" Jan 14 06:39:22.995: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:27.996: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:28.507: INFO: stderr: "" Jan 14 06:39:28.507: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:28.507: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:28.912: INFO: stderr: "" Jan 14 06:39:28.912: INFO: stdout: "" Jan 14 06:39:28.912: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:33.913: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:34.431: INFO: stderr: "" Jan 14 06:39:34.431: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:34.431: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:34.828: INFO: stderr: "" Jan 14 06:39:34.828: INFO: stdout: "" Jan 14 06:39:34.828: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:39.829: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:40.331: INFO: stderr: "" Jan 14 06:39:40.331: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:40.331: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:40.727: INFO: stderr: "" Jan 14 06:39:40.727: INFO: stdout: "" Jan 14 06:39:40.727: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:45.728: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:46.226: INFO: stderr: "" Jan 14 06:39:46.226: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:46.226: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:46.648: INFO: stderr: "" Jan 14 06:39:46.648: INFO: stdout: "" Jan 14 06:39:46.648: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:51.648: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:52.181: INFO: stderr: "" Jan 14 06:39:52.181: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:52.181: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:52.603: INFO: stderr: "" Jan 14 06:39:52.603: INFO: stdout: "" Jan 14 06:39:52.603: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:39:57.604: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:39:58.134: INFO: stderr: "" Jan 14 06:39:58.134: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:39:58.134: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:39:58.552: INFO: stderr: "" Jan 14 06:39:58.552: INFO: stdout: "" Jan 14 06:39:58.552: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:03.553: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:04.071: INFO: stderr: "" Jan 14 06:40:04.071: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:04.071: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:04.495: INFO: stderr: "" Jan 14 06:40:04.495: INFO: stdout: "" Jan 14 06:40:04.495: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:09.496: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:10.031: INFO: stderr: "" Jan 14 06:40:10.031: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:10.031: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:10.449: INFO: stderr: "" Jan 14 06:40:10.449: INFO: stdout: "" Jan 14 06:40:10.449: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:15.449: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:15.979: INFO: stderr: "" Jan 14 06:40:15.979: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:15.979: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:16.379: INFO: stderr: "" Jan 14 06:40:16.379: INFO: stdout: "" Jan 14 06:40:16.379: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:21.379: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:21.915: INFO: stderr: "" Jan 14 06:40:21.915: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:21.915: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:22.349: INFO: stderr: "" Jan 14 06:40:22.349: INFO: stdout: "" Jan 14 06:40:22.349: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:27.350: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:27.882: INFO: stderr: "" Jan 14 06:40:27.882: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:27.882: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:28.283: INFO: stderr: "" Jan 14 06:40:28.283: INFO: stdout: "" Jan 14 06:40:28.283: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:33.283: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:33.796: INFO: stderr: "" Jan 14 06:40:33.796: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:33.796: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:34.194: INFO: stderr: "" Jan 14 06:40:34.194: INFO: stdout: "" Jan 14 06:40:34.194: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:39.195: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:39.698: INFO: stderr: "" Jan 14 06:40:39.698: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:39.698: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:40.108: INFO: stderr: "" Jan 14 06:40:40.108: INFO: stdout: "" Jan 14 06:40:40.108: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:45.109: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:45.619: INFO: stderr: "" Jan 14 06:40:45.619: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:45.619: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:46.037: INFO: stderr: "" Jan 14 06:40:46.037: INFO: stdout: "" Jan 14 06:40:46.037: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:51.038: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:51.546: INFO: stderr: "" Jan 14 06:40:51.546: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:51.546: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:51.945: INFO: stderr: "" Jan 14 06:40:51.945: INFO: stdout: "" Jan 14 06:40:51.945: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:40:56.946: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:40:57.458: INFO: stderr: "" Jan 14 06:40:57.458: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:40:57.458: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:40:57.888: INFO: stderr: "" Jan 14 06:40:57.888: INFO: stdout: "" Jan 14 06:40:57.888: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:41:02.888: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:03.409: INFO: stderr: "" Jan 14 06:41:03.409: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:03.409: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:03.808: INFO: stderr: "" Jan 14 06:41:03.808: INFO: stdout: "" Jan 14 06:41:03.808: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:41:08.809: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:09.308: INFO: stderr: "" Jan 14 06:41:09.308: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:09.308: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:09.711: INFO: stderr: "" Jan 14 06:41:09.711: INFO: stdout: "" Jan 14 06:41:09.711: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:41:14.712: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:15.223: INFO: stderr: "" Jan 14 06:41:15.223: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:15.223: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:15.652: INFO: stderr: "" Jan 14 06:41:15.652: INFO: stdout: "" Jan 14 06:41:15.652: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:41:20.653: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:21.175: INFO: stderr: "" Jan 14 06:41:21.175: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:21.175: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:21.571: INFO: stderr: "" Jan 14 06:41:21.571: INFO: stdout: "" Jan 14 06:41:21.571: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:41:26.571: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:27.078: INFO: stderr: "" Jan 14 06:41:27.078: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:27.078: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:27.488: INFO: stderr: "" Jan 14 06:41:27.488: INFO: stdout: "" Jan 14 06:41:27.488: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:41:32.489: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:33.011: INFO: stderr: "" Jan 14 06:41:33.011: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:33.011: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:33.432: INFO: stderr: "" Jan 14 06:41:33.432: INFO: stdout: "" Jan 14 06:41:33.432: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:41:38.432: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:38.945: INFO: stderr: "" Jan 14 06:41:38.945: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:38.945: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:39.358: INFO: stderr: "" Jan 14 06:41:39.358: INFO: stdout: "" Jan 14 06:41:39.358: INFO: update-demo-nautilus-mrn8r is created but not running Jan 14 06:41:44.359: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:44.887: INFO: stderr: "" Jan 14 06:41:44.887: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:44.887: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:45.285: INFO: stderr: "" Jan 14 06:41:45.285: INFO: stdout: "true" Jan 14 06:41:45.285: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:41:45.718: INFO: stderr: "" Jan 14 06:41:45.718: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:41:45.718: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:41:45.825: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:41:45.826: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:41:45.826: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:41:45.826: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:46.234: INFO: stderr: "" Jan 14 06:41:46.234: INFO: stdout: "" Jan 14 06:41:46.234: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:41:51.235: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:51.747: INFO: stderr: "" Jan 14 06:41:51.748: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:51.748: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:52.184: INFO: stderr: "" Jan 14 06:41:52.184: INFO: stdout: "true" Jan 14 06:41:52.184: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:41:52.597: INFO: stderr: "" Jan 14 06:41:52.597: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:41:52.597: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:41:52.703: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:41:52.703: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:41:52.703: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:41:52.703: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:53.116: INFO: stderr: "" Jan 14 06:41:53.116: INFO: stdout: "" Jan 14 06:41:53.116: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:41:58.116: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:41:58.622: INFO: stderr: "" Jan 14 06:41:58.622: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:41:58.622: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:59.024: INFO: stderr: "" Jan 14 06:41:59.024: INFO: stdout: "true" Jan 14 06:41:59.024: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:41:59.443: INFO: stderr: "" Jan 14 06:41:59.443: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:41:59.443: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:41:59.549: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:41:59.549: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:41:59.549: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:41:59.549: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:41:59.948: INFO: stderr: "" Jan 14 06:41:59.948: INFO: stdout: "" Jan 14 06:41:59.948: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:04.949: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:42:05.465: INFO: stderr: "" Jan 14 06:42:05.465: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:42:05.465: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:05.862: INFO: stderr: "" Jan 14 06:42:05.862: INFO: stdout: "true" Jan 14 06:42:05.862: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:42:06.284: INFO: stderr: "" Jan 14 06:42:06.284: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:42:06.284: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:42:06.389: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:42:06.389: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:42:06.389: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:42:06.389: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:06.794: INFO: stderr: "" Jan 14 06:42:06.794: INFO: stdout: "" Jan 14 06:42:06.794: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:11.795: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:42:12.301: INFO: stderr: "" Jan 14 06:42:12.301: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:42:12.301: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:12.694: INFO: stderr: "" Jan 14 06:42:12.694: INFO: stdout: "true" Jan 14 06:42:12.694: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:42:13.117: INFO: stderr: "" Jan 14 06:42:13.117: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:42:13.117: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:42:13.222: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:42:13.222: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:42:13.222: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:42:13.222: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:13.622: INFO: stderr: "" Jan 14 06:42:13.622: INFO: stdout: "" Jan 14 06:42:13.622: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:18.622: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:42:19.128: INFO: stderr: "" Jan 14 06:42:19.128: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:42:19.128: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:19.533: INFO: stderr: "" Jan 14 06:42:19.533: INFO: stdout: "true" Jan 14 06:42:19.534: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:42:19.936: INFO: stderr: "" Jan 14 06:42:19.936: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:42:19.936: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:42:20.042: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:42:20.043: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:42:20.043: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:42:20.043: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:20.437: INFO: stderr: "" Jan 14 06:42:20.437: INFO: stdout: "" Jan 14 06:42:20.437: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:25.437: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:42:25.942: INFO: stderr: "" Jan 14 06:42:25.942: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:42:25.942: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:26.349: INFO: stderr: "" Jan 14 06:42:26.349: INFO: stdout: "true" Jan 14 06:42:26.349: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:42:26.748: INFO: stderr: "" Jan 14 06:42:26.748: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:42:26.748: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:42:26.854: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:42:26.854: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:42:26.854: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:42:26.854: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:27.248: INFO: stderr: "" Jan 14 06:42:27.248: INFO: stdout: "" Jan 14 06:42:27.248: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:32.249: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:42:32.762: INFO: stderr: "" Jan 14 06:42:32.762: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:42:32.762: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:33.168: INFO: stderr: "" Jan 14 06:42:33.168: INFO: stdout: "true" Jan 14 06:42:33.168: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:42:33.600: INFO: stderr: "" Jan 14 06:42:33.600: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:42:33.600: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:42:33.706: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:42:33.706: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:42:33.706: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:42:33.706: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:34.117: INFO: stderr: "" Jan 14 06:42:34.117: INFO: stdout: "" Jan 14 06:42:34.117: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:39.117: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:42:39.642: INFO: stderr: "" Jan 14 06:42:39.642: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:42:39.642: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:40.056: INFO: stderr: "" Jan 14 06:42:40.056: INFO: stdout: "true" Jan 14 06:42:40.056: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:42:40.472: INFO: stderr: "" Jan 14 06:42:40.472: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:42:40.472: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:42:40.578: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:42:40.578: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:42:40.578: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:42:40.578: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:40.974: INFO: stderr: "" Jan 14 06:42:40.974: INFO: stdout: "" Jan 14 06:42:40.974: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:45.974: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:42:46.487: INFO: stderr: "" Jan 14 06:42:46.487: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:42:46.487: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:46.920: INFO: stderr: "" Jan 14 06:42:46.920: INFO: stdout: "true" Jan 14 06:42:46.920: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:42:47.322: INFO: stderr: "" Jan 14 06:42:47.322: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:42:47.322: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:42:47.428: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:42:47.428: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:42:47.428: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:42:47.428: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:47.832: INFO: stderr: "" Jan 14 06:42:47.832: INFO: stdout: "" Jan 14 06:42:47.832: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:52.833: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:42:53.345: INFO: stderr: "" Jan 14 06:42:53.345: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:42:53.345: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:53.754: INFO: stderr: "" Jan 14 06:42:53.754: INFO: stdout: "true" Jan 14 06:42:53.754: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:42:54.189: INFO: stderr: "" Jan 14 06:42:54.189: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:42:54.190: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:42:54.295: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:42:54.295: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:42:54.295: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:42:54.295: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:42:54.706: INFO: stderr: "" Jan 14 06:42:54.707: INFO: stdout: "" Jan 14 06:42:54.707: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:42:59.707: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:43:00.217: INFO: stderr: "" Jan 14 06:43:00.217: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:43:00.217: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:00.633: INFO: stderr: "" Jan 14 06:43:00.633: INFO: stdout: "true" Jan 14 06:43:00.633: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:43:01.058: INFO: stderr: "" Jan 14 06:43:01.058: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:43:01.058: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:43:01.163: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:43:01.163: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:43:01.163: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:43:01.163: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:01.565: INFO: stderr: "" Jan 14 06:43:01.565: INFO: stdout: "" Jan 14 06:43:01.565: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:43:06.566: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:43:07.112: INFO: stderr: "" Jan 14 06:43:07.112: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:43:07.112: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:07.539: INFO: stderr: "" Jan 14 06:43:07.539: INFO: stdout: "true" Jan 14 06:43:07.539: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:43:07.954: INFO: stderr: "" Jan 14 06:43:07.954: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:43:07.954: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:43:08.059: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:43:08.060: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:43:08.060: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:43:08.060: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:08.463: INFO: stderr: "" Jan 14 06:43:08.463: INFO: stdout: "" Jan 14 06:43:08.463: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:43:13.463: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:43:13.977: INFO: stderr: "" Jan 14 06:43:13.977: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:43:13.977: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:14.378: INFO: stderr: "" Jan 14 06:43:14.378: INFO: stdout: "true" Jan 14 06:43:14.378: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:43:14.779: INFO: stderr: "" Jan 14 06:43:14.779: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:43:14.779: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:43:14.885: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:43:14.885: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:43:14.885: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:43:14.885: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:15.312: INFO: stderr: "" Jan 14 06:43:15.312: INFO: stdout: "" Jan 14 06:43:15.312: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:43:20.313: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:43:20.833: INFO: stderr: "" Jan 14 06:43:20.833: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:43:20.833: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:21.231: INFO: stderr: "" Jan 14 06:43:21.231: INFO: stdout: "true" Jan 14 06:43:21.231: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:43:21.632: INFO: stderr: "" Jan 14 06:43:21.632: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:43:21.632: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:43:21.737: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:43:21.737: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:43:21.737: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:43:21.737: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:22.133: INFO: stderr: "" Jan 14 06:43:22.133: INFO: stdout: "" Jan 14 06:43:22.133: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:43:27.133: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:43:27.638: INFO: stderr: "" Jan 14 06:43:27.638: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:43:27.638: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:28.054: INFO: stderr: "" Jan 14 06:43:28.055: INFO: stdout: "true" Jan 14 06:43:28.055: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:43:28.460: INFO: stderr: "" Jan 14 06:43:28.460: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:43:28.460: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:43:28.566: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:43:28.566: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:43:28.566: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:43:28.566: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:28.970: INFO: stderr: "" Jan 14 06:43:28.970: INFO: stdout: "" Jan 14 06:43:28.970: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:43:33.971: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:43:34.486: INFO: stderr: "" Jan 14 06:43:34.486: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:43:34.486: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:34.901: INFO: stderr: "" Jan 14 06:43:34.901: INFO: stdout: "true" Jan 14 06:43:34.901: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:43:35.296: INFO: stderr: "" Jan 14 06:43:35.296: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:43:35.296: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:43:35.402: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:43:35.402: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:43:35.402: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:43:35.402: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:35.800: INFO: stderr: "" Jan 14 06:43:35.800: INFO: stdout: "" Jan 14 06:43:35.800: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:43:40.802: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' Jan 14 06:43:41.316: INFO: stderr: "" Jan 14 06:43:41.316: INFO: stdout: "update-demo-nautilus-mrn8r update-demo-nautilus-wzc78 " Jan 14 06:43:41.316: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:41.727: INFO: stderr: "" Jan 14 06:43:41.727: INFO: stdout: "true" Jan 14 06:43:41.727: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-mrn8r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' Jan 14 06:43:42.122: INFO: stderr: "" Jan 14 06:43:42.122: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:43:42.122: INFO: validating pod update-demo-nautilus-mrn8r Jan 14 06:43:42.228: INFO: got data: { "image": "nautilus.jpg" } Jan 14 06:43:42.228: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . Jan 14 06:43:42.228: INFO: update-demo-nautilus-mrn8r is verified up and running Jan 14 06:43:42.228: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods update-demo-nautilus-wzc78 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' Jan 14 06:43:42.631: INFO: stderr: "" Jan 14 06:43:42.631: INFO: stdout: "" Jan 14 06:43:42.631: INFO: update-demo-nautilus-wzc78 is created but not running Jan 14 06:43:47.631: FAIL: Timed out after 300 seconds waiting for name=update-demo pods to reach valid state Full Stack Trace k8s.io/kubernetes/test/e2e/kubectl.validateController({0x801e128, 0xc001bd7380}, {0xc0008b92f0?, 0x0?}, 0x2, {0x75cf5a1, 0xb}, {0x75e78bb, 0x10}, 0xc003747740, ...) test/e2e/kubectl/kubectl.go:2431 +0x49d k8s.io/kubernetes/test/e2e/kubectl.glob..func1.6.2() test/e2e/kubectl/kubectl.go:344 +0x1ec �[1mSTEP:�[0m using delete to clean up resources �[38;5;243m01/14/23 06:43:47.632�[0m Jan 14 06:43:47.632: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 delete --grace-period=0 --force -f -' Jan 14 06:43:48.149: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" Jan 14 06:43:48.149: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" Jan 14 06:43:48.149: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get rc,svc -l name=update-demo --no-headers' Jan 14 06:43:48.663: INFO: stderr: "No resources found in kubectl-4613 namespace.\n" Jan 14 06:43:48.663: INFO: stdout: "" Jan 14 06:43:48.663: INFO: Running '/home/prow/go/src/k8s.io/kops/_rundir/6a994572-93d3-11ed-bd5d-72eebb4d772a/kubectl --server=https://api.e2e-e2e-kops-grid-cilium-eni-u2004-k26.test-cncf-aws.k8s.io --kubeconfig=/root/.kube/config --namespace=kubectl-4613 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' Jan 14 06:43:49.062: INFO: stderr: "" Jan 14 06:43:49.062: INFO: stdout: "" [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 Jan 14 06:43:49.062: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 �[1mSTEP:�[0m dump namespace information after failure �[38;5;243m01/14/23 06:43:49.169�[0m �[1mSTEP:�[0m Collecting events from namespace "kubectl-4613". �[38;5;243m01/14/23 06:43:49.169�[0m �[1mSTEP:�[0m Found 29 events. �[38;5;243m01/14/23 06:43:49.274�[0m Jan 14 06:43:49.274: INFO: At 2023-01-14 06:38:46 +0000 UTC - event for update-demo-nautilus: {replication-controller } SuccessfulCreate: Created pod: update-demo-nautilus-wzc78 Jan 14 06:43:49.274: INFO: At 2023-01-14 06:38:46 +0000 UTC - event for update-demo-nautilus: {replication-controller } SuccessfulCreate: Created pod: update-demo-nautilus-mrn8r Jan 14 06:43:49.274: INFO: At 2023-01-14 06:38:46 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e11ce6b194ede5262988539ccc55895e762a91817f16e3ab055b3b7b70424c7a": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:38:46 +0000 UTC - event for update-demo-nautilus-mrn8r: {default-scheduler } Scheduled: Successfully assigned kubectl-4613/update-demo-nautilus-mrn8r to i-0526f6963633e8375 Jan 14 06:43:49.274: INFO: At 2023-01-14 06:38:46 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "38b99e093abbb7ed5b990af4c15ffaeb58e181dbe021490c8b24f6a97b87c620": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:38:46 +0000 UTC - event for update-demo-nautilus-wzc78: {default-scheduler } Scheduled: Successfully assigned kubectl-4613/update-demo-nautilus-wzc78 to i-06bd219a44e00580c Jan 14 06:43:49.274: INFO: At 2023-01-14 06:38:58 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e7fcc5482629abebf6221fe827c318b34feea4d78a8d919f2a16bd0942ee5b57": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:01 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8accdb99819ca962ff8a01822cde3e1f050c20a4a837ca8f3193f5ac9be0e9a2": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:11 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4d82f3f9773284ed850550bfbdf8f7ab850aeb09abb9a703c2ca9fef5f633a5d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:13 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4785dd6ea59690a407b6ea069ff8fdb01523c9bfc664017bf0920f39fc6a7f02": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:23 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "77bbb1a5c32d3996be3280093e0013a2c36362a3443a420d095f923f235b75eb": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:27 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5f26a17cecfba45c2b463fb0e69c418588d0c3f88fa69882c61069805655809a": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:37 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "64d1cc0c8e35b40f114728c593ed6c6e42ed16e869a5da7a485523f4fca34b23": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:41 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "dfabcba62dccae554003567dc47719471762c48441e79765c76239366484b2cf": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:51 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "272f6d6b87ed842c78913b6d578ada18024271afa48ae48b708cece549ebac2b": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:39:53 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8906fc53938e15a56ef4125fd5d2e5a9ca8c62cba9090bf8bda01bd691df467": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:40:04 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "9c781e223db89f7fbc4c5401a9e8f595ab348213441aefc8b45e2b9b67647713": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:40:05 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "adfbd5c5e04c94dae7d68ac4c457f3725d480fc769e718e3e9212dda45bef2b4": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:40:17 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "6c7a1455969bf65e94d99e8f48068d17166b84829ab1615c6bc08da744631a65": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:40:19 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c860d3e5c6bfb079cfceba3f2fccb5bc9acaf40d1c9d65620dca485b9e835494": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:40:29 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "1665ba179ffac9f66705cd5b2ba037f1ca57ad2c4f83781e5a64ded152cf7a20": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:40:32 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5c8a0acb59765ebf545641ec06e2ec5f17865d0ca9608d5b32cecc653bd6d92d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:40:40 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7ff17083ddabd0609ddff3ffbe6af06f27d0a5715bb81f1fa02986354e5fd87e": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:40:45 +0000 UTC - event for update-demo-nautilus-wzc78: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "aaaf7b4739512725373818d1db2fec4eb43975216ac60d03bb98cbaed5d30b50": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:43:49.274: INFO: At 2023-01-14 06:41:33 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} Pulling: Pulling image "registry.k8s.io/e2e-test-images/nautilus:1.7" Jan 14 06:43:49.274: INFO: At 2023-01-14 06:41:39 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} Started: Started container update-demo Jan 14 06:43:49.274: INFO: At 2023-01-14 06:41:39 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} Created: Created container update-demo Jan 14 06:43:49.274: INFO: At 2023-01-14 06:41:39 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} Pulled: Successfully pulled image "registry.k8s.io/e2e-test-images/nautilus:1.7" in 5.303497371s (5.303503668s including waiting) Jan 14 06:43:49.274: INFO: At 2023-01-14 06:43:48 +0000 UTC - event for update-demo-nautilus-mrn8r: {kubelet i-0526f6963633e8375} Killing: Stopping container update-demo Jan 14 06:43:49.379: INFO: POD NODE PHASE GRACE CONDITIONS Jan 14 06:43:49.379: INFO: Jan 14 06:43:49.485: INFO: Logging node info for node i-0526f6963633e8375 Jan 14 06:43:49.590: INFO: Node Info: &Node{ObjectMeta:{i-0526f6963633e8375 b8bbb07c-e234-4117-968a-d4f54d957b46 16511 0 2023-01-14 06:26:11 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a io.kubernetes.storage.mock/node:some-mock-node kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0526f6963633e8375 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0526f6963633e8375 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.59.50 csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-4581":"i-0526f6963633e8375","ebs.csi.aws.com":"i-0526f6963633e8375"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.4.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:39:21 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:41:47 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:io.kubernetes.storage.mock/node":{},"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.4.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0526f6963633e8375,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.4.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:47 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:47 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:47 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:41:47 +0000 UTC,LastTransitionTime:2023-01-14 06:26:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.59.50,},NodeAddress{Type:ExternalIP,Address:13.38.88.176,},NodeAddress{Type:InternalDNS,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-88-176.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec24f53d700a2e9399be7e5e2cc1e943,SystemUUID:ec24f53d-700a-2e93-99be-7e5e2cc1e943,BootID:58d231c1-9ab3-4e54-9948-319cfad92d73,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nautilus@sha256:80ba6c8c44f9623f06e868a1aa66026c8ec438ad814f9ec95e9333b415fe3550 registry.k8s.io/e2e-test-images/nautilus:1.7],SizeBytes:49641698,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[gcr.io/authenticated-image-pulling/alpine@sha256:7ff177862cb50c602bfe81f805969412e619c054a2bbead977d0c276988aa4a0 gcr.io/authenticated-image-pulling/alpine:3.7],SizeBytes:2110879,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4581^2cadbc7d-93d6-11ed-858d-76d972479176],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4581^2cadbc7d-93d6-11ed-858d-76d972479176,DevicePath:,},},Config:nil,},} Jan 14 06:43:49.590: INFO: Logging kubelet events for node i-0526f6963633e8375 Jan 14 06:43:49.699: INFO: Logging pods the kubelet thinks is on node i-0526f6963633e8375 Jan 14 06:43:49.809: INFO: cilium-tv25q started at 2023-01-14 06:26:12 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:49.809: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:49.809: INFO: pod-projected-configmaps-bb73475c-05a7-4e4e-8b32-9886f2febf29 started at 2023-01-14 06:39:21 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:43:49.809: INFO: bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb started at 2023-01-14 06:38:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb ready: false, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-v4q7f started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-l2cw9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-p4cww started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ts46q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6xkx2 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-kvr9w started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:43:49.809: INFO: suspend-false-to-true-cbsf6 started at 2023-01-14 06:39:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container c ready: false, restart count 0 Jan 14 06:43:49.809: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:36 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:49.809: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:43:49.809: INFO: Container driver-registrar ready: true, restart count 0 Jan 14 06:43:49.809: INFO: Container mock ready: true, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-crc8k started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: pvc-volume-tester-lrm5k started at 2023-01-14 06:39:20 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container volume-tester ready: false, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-66vns started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:33:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8z92 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: ebs-csi-node-r8qfk started at 2023-01-14 06:26:12 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:49.809: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:49.809: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:49.809: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:49.809: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-mh56b started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:49.809: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.204: INFO: Latency metrics for node i-0526f6963633e8375 Jan 14 06:43:50.204: INFO: Logging node info for node i-06bd219a44e00580c Jan 14 06:43:50.309: INFO: Node Info: &Node{ObjectMeta:{i-06bd219a44e00580c c2a57daf-87e6-4c31-ab8d-158cf1752c85 16062 0 2023-01-14 06:26:09 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-06bd219a44e00580c kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-06bd219a44e00580c topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.61.252 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-06bd219a44e00580c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.3.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:35:28 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:40:38 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.3.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-06bd219a44e00580c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.3.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:21 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.61.252,},NodeAddress{Type:ExternalIP,Address:15.237.110.205,},NodeAddress{Type:InternalDNS,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-110-205.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec28b615c9f51208890a610e546cafd1,SystemUUID:ec28b615-c9f5-1208-890a-610e546cafd1,BootID:9cfe2407-336f-468c-b599-1b87cbc71140,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847 kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4 kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4,DevicePath:,},},Config:nil,},} Jan 14 06:43:50.310: INFO: Logging kubelet events for node i-06bd219a44e00580c Jan 14 06:43:50.417: INFO: Logging pods the kubelet thinks is on node i-06bd219a44e00580c Jan 14 06:43:50.535: INFO: pod-disruption-failure-ignore-0-dbk78 started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: true, restart count 0 Jan 14 06:43:50.535: INFO: hostexec-i-06bd219a44e00580c-vkgh8 started at 2023-01-14 06:39:10 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:43:50.535: INFO: pod-e030d7c2-b127-467e-8064-fd944a65c42f started at 2023-01-14 06:40:51 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container write-pod ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dbmt8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ztghd started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: pod-disruption-failure-ignore-1-f5m4k started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rlwrq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: pod-db681d65-3e7a-406a-aa51-b57c4ce4869e started at 2023-01-14 06:39:09 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container test-container ready: false, restart count 0 Jan 14 06:43:50.535: INFO: pod-exec-websocket-7f4605b9-4d8c-489c-860b-2b70c44af97b started at 2023-01-14 06:41:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container main ready: false, restart count 0 Jan 14 06:43:50.535: INFO: busybox-a5ef1bf2-f510-44d1-a72c-3211b0bd56f9 started at 2023-01-14 06:41:33 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container busybox ready: false, restart count 0 Jan 14 06:43:50.535: INFO: hostexec started at 2023-01-14 06:43:45 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:43:50.535: INFO: pod-disruption-failure-ignore-1-xjff6 started at 2023-01-14 06:34:27 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6nwrq started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: pod-disruption-failure-ignore-1-6gwgt started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: false, restart count 0 Jan 14 06:43:50.535: INFO: pod-disruption-failure-ignore-1-ghlhh started at 2023-01-14 06:34:12 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zbhxz started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:13 +0000 UTC (0+7 container statuses recorded) Jan 14 06:43:50.535: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:43:50.535: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:43:50.535: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:43:50.535: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:43:50.535: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:43:50.535: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:43:50.535: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:43:50.535: INFO: httpd started at 2023-01-14 06:43:38 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container httpd ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cilium-k6c6s started at 2023-01-14 06:26:10 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:50.535: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:50.535: INFO: downwardapi-volume-75c6efde-4aed-47bb-8563-2d685d442490 started at 2023-01-14 06:39:23 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container client-container ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q2x2n started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: pod-disruption-failure-ignore-1-crpgt started at 2023-01-14 06:34:01 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: false, restart count 0 Jan 14 06:43:50.535: INFO: pod-edd099ae-7d89-4db8-9a86-b238ae68aba9 started at 2023-01-14 06:39:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container write-pod ready: true, restart count 0 Jan 14 06:43:50.535: INFO: ebs-csi-node-62qzb started at 2023-01-14 06:26:10 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:50.535: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:50.535: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:50.535: INFO: liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 started at 2023-01-14 06:39:09 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k776q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-gsthr started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: inline-volume-tester-v5nnb started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:43:50.535: INFO: pod-disruption-failure-ignore-1-5xwhv started at 2023-01-14 06:34:05 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-cf86c started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: inline-volume-tester-2fmpv started at 2023-01-14 06:34:58 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:43:50.535: INFO: suspend-false-to-true-w87tb started at 2023-01-14 06:39:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: false, restart count 0 Jan 14 06:43:50.535: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-jzs4v started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:50.535: INFO: pod-configmaps-fbe406e3-6009-41bb-911b-acb91067c9bd started at 2023-01-14 06:41:33 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:43:50.535: INFO: pod-disruption-failure-ignore-1-frbqx started at 2023-01-14 06:34:08 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container c ready: false, restart count 0 Jan 14 06:43:50.535: INFO: test-ss-0 started at 2023-01-14 06:34:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:50.535: INFO: Container webserver ready: false, restart count 0 Jan 14 06:43:50.535: INFO: dns-test-1b49684b-e070-4d75-b586-93f0f22c501e started at 2023-01-14 06:39:25 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:50.535: INFO: Container jessie-querier ready: false, restart count 0 Jan 14 06:43:50.535: INFO: Container querier ready: false, restart count 0 Jan 14 06:43:50.535: INFO: Container webserver ready: false, restart count 0 Jan 14 06:43:50.948: INFO: Latency metrics for node i-06bd219a44e00580c Jan 14 06:43:50.948: INFO: Logging node info for node i-0930a50194a147b36 Jan 14 06:43:51.053: INFO: Node Info: &Node{ObjectMeta:{i-0930a50194a147b36 4316b3c5-1eeb-4ee2-9818-40f99d51117d 16789 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0930a50194a147b36 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0930a50194a147b36 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.36.60 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0930a50194a147b36"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.1.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:42:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.1.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0930a50194a147b36,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.1.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:42:44 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:42:44 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:42:44 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:42:44 +0000 UTC,LastTransitionTime:2023-01-14 06:26:19 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.36.60,},NodeAddress{Type:ExternalIP,Address:15.237.49.122,},NodeAddress{Type:InternalDNS,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-49-122.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2ac5dccb44f409fdc575df19a0b9a7,SystemUUID:ec2ac5dc-cb44-f409-fdc5-75df19a0b9a7,BootID:9dff06f2-e51d-4b5e-a657-e8f546eded95,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:68d396900aeaa072c1f27289485fdac29834045a6f3ffe369bf389d830ef572d registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.6],SizeBytes:20293261,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:43:51.053: INFO: Logging kubelet events for node i-0930a50194a147b36 Jan 14 06:43:51.161: INFO: Logging pods the kubelet thinks is on node i-0930a50194a147b36 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g4xd9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: ebs-csi-node-rpzft started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:51.272: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:51.272: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zjjvx started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-pkvpm started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: coredns-559769c974-5xkn6 started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container coredns ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g6wnp started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-slt2z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: coredns-autoscaler-7cb5c5b969-svc7j started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container autoscaler ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-xxqp8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q9h4x started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rfg9z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:42:07 +0000 UTC (0+7 container statuses recorded) Jan 14 06:43:51.272: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:43:51.272: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:43:51.272: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:43:51.272: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:43:51.272: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:43:51.272: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:43:51.272: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dw26q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-bb7qt started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:51.272: INFO: cilium-75rxm started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:51.272: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:51.272: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:51.678: INFO: Latency metrics for node i-0930a50194a147b36 Jan 14 06:43:51.678: INFO: Logging node info for node i-095cd924e787c9946 Jan 14 06:43:51.783: INFO: Node Info: &Node{ObjectMeta:{i-095cd924e787c9946 7ac98e5e-c131-42e0-a67e-ba9b45d163a4 16176 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-095cd924e787c9946 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-095cd924e787c9946 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.51.27 csi.volume.kubernetes.io/nodeid:{"csi-hostpath-ephemeral-9794":"i-095cd924e787c9946","ebs.csi.aws.com":"i-095cd924e787c9946"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.2.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:40:50 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:40:53 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.2.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-095cd924e787c9946,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.2.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:53 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:53 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:53 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:40:53 +0000 UTC,LastTransitionTime:2023-01-14 06:26:20 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.51.27,},NodeAddress{Type:ExternalIP,Address:13.38.27.88,},NodeAddress{Type:InternalDNS,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-27-88.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2449c051467854b20245f8e87294d1,SystemUUID:ec2449c0-5146-7854-b202-45f8e87294d1,BootID:11ed24c0-6b48-4372-960a-a4095c73f4ca,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/sample-apiserver@sha256:8d70890151aa5d096f331cb9da1b9cd5be0412b7363fe67b5c3befdcaa2a28d0 registry.k8s.io/e2e-test-images/sample-apiserver:1.17.7],SizeBytes:25667066,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-ephemeral-9794^61c31778-93d6-11ed-8af7-82bdff3b6028],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-ephemeral-9794^61c31778-93d6-11ed-8af7-82bdff3b6028,DevicePath:,},},Config:nil,},} Jan 14 06:43:51.783: INFO: Logging kubelet events for node i-095cd924e787c9946 Jan 14 06:43:51.900: INFO: Logging pods the kubelet thinks is on node i-095cd924e787c9946 Jan 14 06:43:52.011: INFO: cilium-kpqdf started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-qnwnv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-94lp8 started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:43:00 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8pkw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-7bdwh started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-d9fvw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-8x9bt started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ppzfj started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: netserver-3 started at 2023-01-14 06:43:51 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container webserver ready: false, restart count 0 Jan 14 06:43:52.011: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:34 +0000 UTC (0+7 container statuses recorded) Jan 14 06:43:52.011: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container csi-snapshotter ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container hostpath ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:52.011: INFO: coredns-559769c974-lpb2c started at 2023-01-14 06:26:42 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container coredns ready: true, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k5vzv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: inline-volume-tester-xl4dq started at 2023-01-14 06:40:49 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-79v7d started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: ebs-csi-node-q6j9r started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:52.011: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:52.011: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-hghxq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.011: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:43:52.011: INFO: csi-mockplugin-0 started at 2023-01-14 06:43:00 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:52.011: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:43:52.011: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:43:52.011: INFO: Container mock ready: false, restart count 0 Jan 14 06:43:52.450: INFO: Latency metrics for node i-095cd924e787c9946 Jan 14 06:43:52.450: INFO: Logging node info for node i-0ea715ad3f7d7c666 Jan 14 06:43:52.557: INFO: Node Info: &Node{ObjectMeta:{i-0ea715ad3f7d7c666 1b9ffdb4-6e31-4298-bf35-45383b8cddd4 16475 0 2023-01-14 06:24:19 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:c5.large beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kops.k8s.io/kops-controller-pki: kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0ea715ad3f7d7c666 kubernetes.io/os:linux node-role.kubernetes.io/control-plane: node.kubernetes.io/exclude-from-external-load-balancers: node.kubernetes.io/instance-type:c5.large topology.ebs.csi.aws.com/zone:eu-west-3a topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.43.108 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0ea715ad3f7d7c666"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2023-01-14 06:24:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {protokube Update v1 2023-01-14 06:24:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kops.k8s.io/kops-controller-pki":{},"f:node-role.kubernetes.io/control-plane":{},"f:node.kubernetes.io/exclude-from-external-load-balancers":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:25:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.0.0/24\"":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:taints":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kubelet Update v1 2023-01-14 06:41:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.0.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0ea715ad3f7d7c666,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/control-plane,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[100.96.0.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3892301824 0} {<nil>} 3801076Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3787444224 0} {<nil>} 3698676Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:39 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:39 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:41:39 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:41:39 +0000 UTC,LastTransitionTime:2023-01-14 06:25:06 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.43.108,},NodeAddress{Type:ExternalIP,Address:13.37.224.194,},NodeAddress{Type:InternalDNS,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-37-224-194.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec29ac40ecac712560f472ac147406f5,SystemUUID:ec29ac40-ecac-7125-60f4-72ac147406f5,BootID:6aefaddb-a8fb-42ca-b933-086be838242c,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/etcdadm/etcd-manager@sha256:66a453db625abb268f4b3bbefc5a34a171d81e6e8796cecca54cfd71775c77c4 registry.k8s.io/etcdadm/etcd-manager:v3.0.20221209],SizeBytes:231502799,},ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.26.0],SizeBytes:135162323,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.26.0],SizeBytes:124991801,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.26.0],SizeBytes:57657656,},ContainerImage{Names:[registry.k8s.io/kops/kops-controller:1.27.0-alpha.1],SizeBytes:43455400,},ContainerImage{Names:[registry.k8s.io/kops/dns-controller:1.27.0-alpha.1],SizeBytes:42802033,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[quay.io/cilium/operator@sha256:a6d24a006a6b92967ac90786b49bc1ac26e5477cf028cd1186efcfc2466484db quay.io/cilium/operator:v1.12.5],SizeBytes:26802430,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 registry.k8s.io/sig-storage/csi-provisioner:v3.1.0],SizeBytes:23345856,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 registry.k8s.io/sig-storage/csi-resizer:v1.4.0],SizeBytes:22381475,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b registry.k8s.io/sig-storage/csi-attacher:v3.4.0],SizeBytes:22085298,},ContainerImage{Names:[registry.k8s.io/provider-aws/cloud-controller-manager@sha256:fdeb61e3e42ecd9cca868d550ebdb88dd6341d9e91fcfa9a37e227dab2ad22cb registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.0],SizeBytes:20154862,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/kops/kube-apiserver-healthcheck:1.27.0-alpha.1],SizeBytes:4967345,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:43:52.557: INFO: Logging kubelet events for node i-0ea715ad3f7d7c666 Jan 14 06:43:52.664: INFO: Logging pods the kubelet thinks is on node i-0ea715ad3f7d7c666 Jan 14 06:43:52.774: INFO: kube-controller-manager-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.774: INFO: Container kube-controller-manager ready: true, restart count 2 Jan 14 06:43:52.774: INFO: kube-scheduler-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.774: INFO: Container kube-scheduler ready: true, restart count 0 Jan 14 06:43:52.774: INFO: cilium-vl5tq started at 2023-01-14 06:24:57 +0000 UTC (1+1 container statuses recorded) Jan 14 06:43:52.774: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:43:52.774: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:43:52.774: INFO: ebs-csi-node-knngk started at 2023-01-14 06:24:57 +0000 UTC (0+3 container statuses recorded) Jan 14 06:43:52.774: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:52.774: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:52.774: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:43:52.774: INFO: dns-controller-69987775c6-66b5p started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.774: INFO: Container dns-controller ready: true, restart count 0 Jan 14 06:43:52.774: INFO: ebs-csi-controller-5bd98b456f-zxg2l started at 2023-01-14 06:24:57 +0000 UTC (0+5 container statuses recorded) Jan 14 06:43:52.774: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:43:52.774: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:43:52.774: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:43:52.774: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:43:52.774: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:43:52.774: INFO: etcd-manager-main-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.774: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:43:52.774: INFO: kube-apiserver-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+2 container statuses recorded) Jan 14 06:43:52.774: INFO: Container healthcheck ready: true, restart count 0 Jan 14 06:43:52.774: INFO: Container kube-apiserver ready: true, restart count 1 Jan 14 06:43:52.774: INFO: aws-cloud-controller-manager-8g49k started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.774: INFO: Container aws-cloud-controller-manager ready: true, restart count 0 Jan 14 06:43:52.774: INFO: cilium-operator-5dd44dc49f-hdhf7 started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.774: INFO: Container cilium-operator ready: true, restart count 0 Jan 14 06:43:52.774: INFO: etcd-manager-events-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.774: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:43:52.775: INFO: kops-controller-8ntms started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:43:52.775: INFO: Container kops-controller ready: true, restart count 0 Jan 14 06:43:53.145: INFO: Latency metrics for node i-0ea715ad3f7d7c666 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 �[1mSTEP:�[0m Destroying namespace "kubectl-4613" for this suite. �[38;5;243m01/14/23 06:43:53.145�[0m
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sNetworking\sGranular\sChecks\:\sPods\sshould\sfunction\sfor\snode\-pod\scommunication\:\shttp\s\[LinuxOnly\]\s\[NodeConformance\]\s\[Conformance\]$'
test/e2e/framework/network/utils.go:866 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000360c40, {0x75c6f5c, 0x9}, 0xc0031ac9f0) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000360c40, 0x47?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.NewCoreNetworkingTestConfig(0xc000a40000, 0x1) test/e2e/framework/network/utils.go:144 +0xfb k8s.io/kubernetes/test/e2e/common/network.glob..func1.1.4() test/e2e/common/network/networking.go:106 +0x34from junit_01.xml
[BeforeEach] [sig-network] Networking set up framework | framework.go:178 �[1mSTEP:�[0m Creating a kubernetes client �[38;5;243m01/14/23 06:33:55.28�[0m Jan 14 06:33:55.280: INFO: >>> kubeConfig: /root/.kube/config �[1mSTEP:�[0m Building a namespace api object, basename pod-network-test �[38;5;243m01/14/23 06:33:55.282�[0m �[1mSTEP:�[0m Waiting for a default service account to be provisioned in namespace �[38;5;243m01/14/23 06:33:55.593�[0m �[1mSTEP:�[0m Waiting for kube-root-ca.crt to be provisioned in namespace �[38;5;243m01/14/23 06:33:55.799�[0m [BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 [It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] test/e2e/common/network/networking.go:105 �[1mSTEP:�[0m Performing setup for networking test in namespace pod-network-test-6406 �[38;5;243m01/14/23 06:33:56.004�[0m �[1mSTEP:�[0m creating a selector �[38;5;243m01/14/23 06:33:56.004�[0m �[1mSTEP:�[0m Creating the service pods in kubernetes �[38;5;243m01/14/23 06:33:56.004�[0m Jan 14 06:33:56.004: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Jan 14 06:33:56.638: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-6406" to be "running and ready" Jan 14 06:33:56.742: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 103.336008ms Jan 14 06:33:56.742: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:33:58.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 2.207991339s Jan 14 06:33:58.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:00.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 4.207917525s Jan 14 06:34:00.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:02.847: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.20850581s Jan 14 06:34:02.847: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:04.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 8.207387958s Jan 14 06:34:04.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:06.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 10.207676432s Jan 14 06:34:06.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:08.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 12.207277198s Jan 14 06:34:08.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:10.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 14.206999976s Jan 14 06:34:10.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:12.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 16.20712696s Jan 14 06:34:12.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:14.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 18.207949786s Jan 14 06:34:14.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:16.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 20.207501276s Jan 14 06:34:16.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:18.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 22.207986919s Jan 14 06:34:18.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:20.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 24.207700328s Jan 14 06:34:20.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:22.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 26.206904233s Jan 14 06:34:22.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:24.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 28.207174653s Jan 14 06:34:24.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:26.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 30.20712795s Jan 14 06:34:26.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:28.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 32.206914591s Jan 14 06:34:28.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:30.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 34.207207268s Jan 14 06:34:30.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:32.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 36.206961856s Jan 14 06:34:32.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:34.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 38.206642553s Jan 14 06:34:34.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:36.848: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 40.209566126s Jan 14 06:34:36.848: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:38.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 42.208160117s Jan 14 06:34:38.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:40.847: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 44.20895675s Jan 14 06:34:40.847: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:42.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 46.207304474s Jan 14 06:34:42.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:44.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 48.20790251s Jan 14 06:34:44.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:46.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 50.207739567s Jan 14 06:34:46.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:48.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 52.207910354s Jan 14 06:34:48.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:50.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 54.207760217s Jan 14 06:34:50.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:52.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 56.207716782s Jan 14 06:34:52.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:54.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 58.20775895s Jan 14 06:34:54.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:56.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.207451382s Jan 14 06:34:56.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:34:58.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.208015867s Jan 14 06:34:58.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:00.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.207234018s Jan 14 06:35:00.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:02.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.207304732s Jan 14 06:35:02.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:04.847: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.208330524s Jan 14 06:35:04.847: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:06.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.207308069s Jan 14 06:35:06.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:08.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.208164828s Jan 14 06:35:08.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:10.847: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.208244293s Jan 14 06:35:10.847: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:12.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.207326026s Jan 14 06:35:12.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:14.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.20772723s Jan 14 06:35:14.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:16.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.207779552s Jan 14 06:35:16.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:18.847: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.208266323s Jan 14 06:35:18.847: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:20.845: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.206975204s Jan 14 06:35:20.845: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:22.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.207458752s Jan 14 06:35:22.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:24.846: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.207299003s Jan 14 06:35:24.846: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:26.845: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m30.207062112s Jan 14 06:35:26.845: INFO: The phase of Pod netserver-0 is Running (Ready = false) Jan 14 06:35:28.846: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m32.207301501s Jan 14 06:35:28.846: INFO: The phase of Pod netserver-0 is Running (Ready = false) Jan 14 06:35:30.846: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m34.20754862s Jan 14 06:35:30.846: INFO: The phase of Pod netserver-0 is Running (Ready = false) Jan 14 06:35:32.845: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m36.207065872s Jan 14 06:35:32.845: INFO: The phase of Pod netserver-0 is Running (Ready = false) Jan 14 06:35:34.846: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m38.208136011s Jan 14 06:35:34.846: INFO: The phase of Pod netserver-0 is Running (Ready = false) Jan 14 06:35:36.846: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 1m40.207759591s Jan 14 06:35:36.846: INFO: The phase of Pod netserver-0 is Running (Ready = true) Jan 14 06:35:36.846: INFO: Pod "netserver-0" satisfied condition "running and ready" Jan 14 06:35:36.950: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-6406" to be "running and ready" Jan 14 06:35:37.053: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 103.609926ms Jan 14 06:35:37.053: INFO: The phase of Pod netserver-1 is Running (Ready = true) Jan 14 06:35:37.053: INFO: Pod "netserver-1" satisfied condition "running and ready" Jan 14 06:35:37.157: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-6406" to be "running and ready" Jan 14 06:35:37.260: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 103.501643ms Jan 14 06:35:37.260: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:39.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.207525061s Jan 14 06:35:39.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:41.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4.207267346s Jan 14 06:35:41.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:43.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 6.208176536s Jan 14 06:35:43.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:45.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 8.208177093s Jan 14 06:35:45.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:47.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 10.20754175s Jan 14 06:35:47.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:49.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 12.2072925s Jan 14 06:35:49.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:51.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 14.207440813s Jan 14 06:35:51.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:53.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 16.207214046s Jan 14 06:35:53.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:55.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 18.207154749s Jan 14 06:35:55.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:57.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 20.207755497s Jan 14 06:35:57.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:35:59.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 22.207282335s Jan 14 06:35:59.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:01.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 24.208105443s Jan 14 06:36:01.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:03.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 26.207218446s Jan 14 06:36:03.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:05.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 28.207568761s Jan 14 06:36:05.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:07.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 30.208119975s Jan 14 06:36:07.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:09.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 32.208214418s Jan 14 06:36:09.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:11.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 34.207346887s Jan 14 06:36:11.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:13.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 36.207341794s Jan 14 06:36:13.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:15.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 38.207948663s Jan 14 06:36:15.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:17.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 40.207495852s Jan 14 06:36:17.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:19.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 42.208480152s Jan 14 06:36:19.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:21.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 44.208125492s Jan 14 06:36:21.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:23.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 46.208252989s Jan 14 06:36:23.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:25.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 48.207074956s Jan 14 06:36:25.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:27.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 50.207346315s Jan 14 06:36:27.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:29.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 52.20732142s Jan 14 06:36:29.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:31.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 54.207376358s Jan 14 06:36:31.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:33.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 56.207994532s Jan 14 06:36:33.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:35.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 58.20711849s Jan 14 06:36:35.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:37.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.207444206s Jan 14 06:36:37.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:39.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.207273157s Jan 14 06:36:39.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:41.369: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.212184209s Jan 14 06:36:41.369: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:43.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.207133289s Jan 14 06:36:43.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:45.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.207076554s Jan 14 06:36:45.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:47.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.207422593s Jan 14 06:36:47.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:49.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.207235914s Jan 14 06:36:49.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:51.367: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.210353214s Jan 14 06:36:51.367: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:53.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.207137583s Jan 14 06:36:53.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:55.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.206984257s Jan 14 06:36:55.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:57.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.206992226s Jan 14 06:36:57.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:36:59.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.207378812s Jan 14 06:36:59.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:01.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.207283885s Jan 14 06:37:01.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:03.369: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.21233328s Jan 14 06:37:03.369: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:05.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.207155354s Jan 14 06:37:05.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:07.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.207189257s Jan 14 06:37:07.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:09.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.20718893s Jan 14 06:37:09.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:11.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.208506632s Jan 14 06:37:11.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:13.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.207337904s Jan 14 06:37:13.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:15.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.207415949s Jan 14 06:37:15.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:17.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.207865805s Jan 14 06:37:17.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:19.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.207244603s Jan 14 06:37:19.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:21.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.207058366s Jan 14 06:37:21.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:23.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.207013945s Jan 14 06:37:23.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:25.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.207135701s Jan 14 06:37:25.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:27.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.207128392s Jan 14 06:37:27.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:29.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.207852628s Jan 14 06:37:29.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:31.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.207717831s Jan 14 06:37:31.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:33.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.206906339s Jan 14 06:37:33.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:35.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.207830468s Jan 14 06:37:35.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:37.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.207069375s Jan 14 06:37:37.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:39.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m2.206937235s Jan 14 06:37:39.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:41.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m4.207585225s Jan 14 06:37:41.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:43.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m6.207914832s Jan 14 06:37:43.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:45.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m8.206920343s Jan 14 06:37:45.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:47.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m10.208456184s Jan 14 06:37:47.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:49.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m12.207816949s Jan 14 06:37:49.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:51.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m14.206923166s Jan 14 06:37:51.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:53.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m16.20716649s Jan 14 06:37:53.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:55.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m18.206866089s Jan 14 06:37:55.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:57.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m20.206898377s Jan 14 06:37:57.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:37:59.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m22.208013451s Jan 14 06:37:59.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:01.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m24.2078854s Jan 14 06:38:01.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:03.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m26.207943018s Jan 14 06:38:03.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:05.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m28.207331083s Jan 14 06:38:05.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:07.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m30.208096393s Jan 14 06:38:07.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:09.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m32.207222264s Jan 14 06:38:09.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:11.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m34.207180689s Jan 14 06:38:11.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:13.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m36.207360347s Jan 14 06:38:13.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:15.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m38.207440403s Jan 14 06:38:15.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:17.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m40.208191823s Jan 14 06:38:17.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:19.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m42.207625521s Jan 14 06:38:19.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:21.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m44.207266168s Jan 14 06:38:21.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:23.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m46.207290768s Jan 14 06:38:23.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:25.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m48.206969322s Jan 14 06:38:25.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:27.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m50.207108198s Jan 14 06:38:27.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:29.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m52.206986082s Jan 14 06:38:29.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:31.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m54.207263969s Jan 14 06:38:31.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:33.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m56.207223581s Jan 14 06:38:33.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:35.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 2m58.207173392s Jan 14 06:38:35.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:37.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m0.207424315s Jan 14 06:38:37.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:39.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m2.207183904s Jan 14 06:38:39.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:41.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m4.20722593s Jan 14 06:38:41.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:43.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m6.20763682s Jan 14 06:38:43.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:45.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m8.208048778s Jan 14 06:38:45.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:47.366: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m10.209110979s Jan 14 06:38:47.366: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:49.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m12.207835381s Jan 14 06:38:49.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:51.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m14.20823513s Jan 14 06:38:51.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:53.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m16.207521508s Jan 14 06:38:53.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:55.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m18.207157473s Jan 14 06:38:55.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:57.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m20.207882649s Jan 14 06:38:57.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:38:59.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m22.20761281s Jan 14 06:38:59.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:01.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m24.207256597s Jan 14 06:39:01.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:03.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m26.207586123s Jan 14 06:39:03.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:05.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m28.207023452s Jan 14 06:39:05.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:07.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m30.207000514s Jan 14 06:39:07.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:09.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m32.208363084s Jan 14 06:39:09.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:11.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m34.20773303s Jan 14 06:39:11.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:13.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m36.207710717s Jan 14 06:39:13.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:15.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m38.207992445s Jan 14 06:39:15.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:17.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m40.207687273s Jan 14 06:39:17.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:19.495: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m42.337809411s Jan 14 06:39:19.495: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:21.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m44.20694803s Jan 14 06:39:21.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:23.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m46.207205649s Jan 14 06:39:23.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:25.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m48.207073325s Jan 14 06:39:25.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:27.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m50.20688128s Jan 14 06:39:27.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:29.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m52.207852604s Jan 14 06:39:29.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:31.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m54.207487442s Jan 14 06:39:31.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:33.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m56.206935961s Jan 14 06:39:33.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:35.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 3m58.207866894s Jan 14 06:39:35.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:37.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m0.207117159s Jan 14 06:39:37.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:39.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m2.207162798s Jan 14 06:39:39.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:41.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m4.206966475s Jan 14 06:39:41.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:43.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m6.206935771s Jan 14 06:39:43.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:45.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m8.207037988s Jan 14 06:39:45.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:47.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m10.207595301s Jan 14 06:39:47.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:49.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m12.206970997s Jan 14 06:39:49.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:51.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m14.207743102s Jan 14 06:39:51.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:53.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m16.207793061s Jan 14 06:39:53.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:55.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m18.207638433s Jan 14 06:39:55.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:57.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m20.207579401s Jan 14 06:39:57.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:39:59.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m22.206736441s Jan 14 06:39:59.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:01.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m24.207585696s Jan 14 06:40:01.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:03.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m26.208378972s Jan 14 06:40:03.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:05.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m28.207328598s Jan 14 06:40:05.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:07.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m30.207130367s Jan 14 06:40:07.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:09.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m32.207214457s Jan 14 06:40:09.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:11.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m34.207202196s Jan 14 06:40:11.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:13.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m36.207346074s Jan 14 06:40:13.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:15.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m38.207489578s Jan 14 06:40:15.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:17.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m40.208532669s Jan 14 06:40:17.366: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:19.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m42.20724285s Jan 14 06:40:19.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:21.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m44.207294949s Jan 14 06:40:21.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:23.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m46.207456676s Jan 14 06:40:23.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:25.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m48.207139916s Jan 14 06:40:25.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:27.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m50.207160294s Jan 14 06:40:27.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:29.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m52.20708668s Jan 14 06:40:29.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:31.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m54.207520823s Jan 14 06:40:31.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:33.365: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m56.207631563s Jan 14 06:40:33.365: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:35.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 4m58.207053379s Jan 14 06:40:35.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:37.364: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.207191396s Jan 14 06:40:37.364: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:37.468: INFO: Pod "netserver-2": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.310846212s Jan 14 06:40:37.468: INFO: The phase of Pod netserver-2 is Pending, waiting for it to be Running (with Ready = true) Jan 14 06:40:37.469: INFO: Unexpected error: <*pod.timeoutError | 0xc00309eb40>: { msg: "timed out while waiting for pod pod-network-test-6406/netserver-2 to be running and ready", observedObjects: [ <*v1.Pod | 0xc001476480>{ TypeMeta: {Kind: "", APIVersion: ""}, ObjectMeta: { Name: "netserver-2", GenerateName: "", Namespace: "pod-network-test-6406", SelfLink: "", UID: "8f1f22b0-b6f9-45ad-bcf2-5c8bb325e1ca", ResourceVersion: "13379", Generation: 0, CreationTimestamp: { Time: { wall: 0, ext: 63809274836, loc: { name: "Local", zone: [ {name: "UTC", offset: 0, isDST: false}, ], tx: [ { when: -576460752303423488, index: 0, isstd: false, isutc: false, }, ], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: "UTC", offset: 0, isDST: false}, }, }, }, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: { "selector-6e8a1a2a-c6d0-45aa-8197-2e3fd0a43101": "true", }, Annotations: nil, OwnerReferences: nil, Finalizers: nil, ManagedFields: [ { Manager: "e2e.test", Operation: "Update", APIVersion: "v1", Time: { Time: { wall: 0, ext: 63809274836, loc: { name: "Local", zone: [...], tx: [...], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: ..., offset: ..., isDST: ...}, }, }, }, FieldsType: "FieldsV1", FieldsV1: { Raw: "{\"f:metadata\":{\"f:labels\":{\".\":{},\"f:selector-6e8a1a2a-c6d0-45aa-8197-2e3fd0a43101\":{}}},\"f:spec\":{\"f:containers\":{\"k:{\\\"name\\\":\\\"webserver\\\"}\":{\".\":{},\"f:args\":{},\"f:image\":{},\"f:imagePullPolicy\":{},\"f:livenessProbe\":{\".\":{},\"f:failureThreshold\":{},\"f:httpGet\":{\".\":{},\"f:path\":{},\"f:port\":{},\"f:scheme\":{}},\"f:initialDelaySeconds\":{},\"f:periodSeconds\":{},\"f:successThreshold\":{},\"f:timeoutSeconds\":{}},\"f:name\":{},\"f:ports\":{\".\":{},\"k:{\\\"containerPort\\\":8081,\\\"protocol\\\":\\\"UDP\\\"}\":{\".\":{},\"f:containerPort\":{},\"f:name\":{},\"f:protocol\":{}},\"k:{\\\"containerPort\\\":8083,\\\"protocol\\\":\\\"TCP\\\"}\":{\".\":{},\"f:containerPort\":{},\"f:name\":{},\"f:protocol\":{}}},\"f:readinessProbe\":{\".\":{},\"f:failureThreshold\":{},\"f:httpGet\":{\".\":{},\"f:path\":{},\"f:port\":{},\"f:scheme\":{}},\"f:initialDelaySeconds\":{},\"f:periodSeconds\":{},\"f:successThreshold\":{},\"f:timeoutSeconds\":{}},\"f:resources\":{},\"f:terminationMessagePath\":{},\"f:terminationMessagePolicy\":{}}},\"f:dnsPolicy\":{},\"f:enableServiceLinks\":{},\"f:nodeSelector\":{},\"f:restartPolicy\":{},\"f:schedulerName\":{},\"f:securityContext\":{},\"f:terminationGracePeriodSeconds\":{}}}", }, Subresource: "", }, { Manager: "kubelet", Operation: "Update", APIVersion: "v1", Time: { Time: { wall: 0, ext: 63809274836, loc: { name: "Local", zone: [...], tx: [...], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: ..., offset: ..., isDST: ...}, }, }, }, FieldsType: "FieldsV1", FieldsV1: { Raw: "{\"f:status\":{\"f:conditions\":{\"k:{\\\"type\\\":\\\"ContainersReady\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:message\":{},\"f:reason\":{},\"f:status\":{},\"f:type\":{}},\"k:{\\\"type\\\":\\\"Initialized\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:status\":{},\"f:type\":{}},\"k:{\\\"type\\\":\\\"Ready\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:message\":{},\"f:reason\":{},\"f:status\":{},\"f:type\":{}}},\"f:containerStatuses\":{},\"f:hostIP\":{},\"f:startTime\":{}}}", }, Subresource: "status", }, ], }, Spec: { Volumes: [ { Name: "kube-api-access-v6fnp", VolumeSource: { HostPath: nil, EmptyDir: nil, GCEPersistentDisk: nil, AWSElasticBlockStore: nil, GitRepo: nil, Secret: nil, NFS: nil, ISCSI: nil, Glusterfs: nil, PersistentVolumeClaim: nil, RBD: nil, FlexVolume: nil, Cinder: nil, CephFS: nil, Flocker: nil, DownwardAPI: nil, FC: nil, AzureFile: nil, ConfigMap: nil, VsphereVolume: nil, Quobyte: nil, AzureDisk: nil, PhotonPersistentDisk: nil, Projected: { Sources: [ { Secret: ..., DownwardAPI: ..., ConfigMap: ..., ServiceAccountToken: ..., }, ... Gomega truncated this representation as it exceeds 'format.MaxLength'. Consider having the object provide a custom 'GomegaStringer' representation or adjust the parameters in Gomega's 'format' package. Learn more here: https://onsi.github.io/gomega/#adjusting-output Jan 14 06:40:37.469: FAIL: timed out while waiting for pod pod-network-test-6406/netserver-2 to be running and ready Full Stack Trace k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000360c40, {0x75c6f5c, 0x9}, 0xc0031ac9f0) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000360c40, 0x47?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.NewCoreNetworkingTestConfig(0xc000a40000, 0x1) test/e2e/framework/network/utils.go:144 +0xfb k8s.io/kubernetes/test/e2e/common/network.glob..func1.1.4() test/e2e/common/network/networking.go:106 +0x34 [AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 Jan 14 06:40:37.470: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 �[1mSTEP:�[0m dump namespace information after failure �[38;5;243m01/14/23 06:40:37.574�[0m �[1mSTEP:�[0m Collecting events from namespace "pod-network-test-6406". �[38;5;243m01/14/23 06:40:37.574�[0m �[1mSTEP:�[0m Found 30 events. �[38;5;243m01/14/23 06:40:37.678�[0m Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:56 +0000 UTC - event for netserver-0: {default-scheduler } Scheduled: Successfully assigned pod-network-test-6406/netserver-0 to i-0526f6963633e8375 Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:56 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "0cda18af316afdfa514f6a2f236bc4f29d657dc798fc76a871d7e188106b1207": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:56 +0000 UTC - event for netserver-1: {default-scheduler } Scheduled: Successfully assigned pod-network-test-6406/netserver-1 to i-06bd219a44e00580c Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:56 +0000 UTC - event for netserver-2: {default-scheduler } Scheduled: Successfully assigned pod-network-test-6406/netserver-2 to i-0930a50194a147b36 Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:56 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4f9511ccdb6118a84e8df4218e81dc741715024373f58eb1dcb10319c4179fad": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:56 +0000 UTC - event for netserver-3: {default-scheduler } Scheduled: Successfully assigned pod-network-test-6406/netserver-3 to i-095cd924e787c9946 Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:57 +0000 UTC - event for netserver-3: {kubelet i-095cd924e787c9946} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:57 +0000 UTC - event for netserver-3: {kubelet i-095cd924e787c9946} Started: Started container webserver Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:57 +0000 UTC - event for netserver-3: {kubelet i-095cd924e787c9946} Created: Created container webserver Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:58 +0000 UTC - event for netserver-1: {kubelet i-06bd219a44e00580c} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:58 +0000 UTC - event for netserver-1: {kubelet i-06bd219a44e00580c} Created: Created container webserver Jan 14 06:40:37.678: INFO: At 2023-01-14 06:33:58 +0000 UTC - event for netserver-1: {kubelet i-06bd219a44e00580c} Started: Started container webserver Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:08 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "b3769eb491f0001df622090fe944c070c6acd5e3c32b37189f9e64219f9f8195": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:10 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "d35402c59c3634649e9d2ae04e6e215ed948659a59b417968f947f35ec007a92": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:21 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4b95fc1aa25c81a60e63283270bde876a41a4b98012c963bf66012de82897740": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:22 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4232dd9e6fcf423fcd3e5646afe0e025f96b64021a798821293903286a7fd3f3": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:32 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "eda8903caab64117388c4c9458ec584bd8053752a2ed6536d2dd2f2d71a02f53": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:35 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "45b97663ef7296d161d160ac15aabaaafb66c2ff2a62808b3b2b0aaacdc75e90": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:43 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "518d8b2ff09399839efaf994acc85532597f640db4692f34bb2ae4a097f69821": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:47 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "01e5443b8cc6b2b57b4e34cbb7b7dc711a6a3923214210419834d3abe5e8ecfc": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:34:57 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "88fa8c297bb7488791c4b7d62d45504b82648577e17c9bdd20bdf76de51f090f": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.678: INFO: At 2023-01-14 06:35:00 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e8a76e0bac42e4bb69d5e67ba2ed6875bedbc706ffc891425a5dcc19690d7e90": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.679: INFO: At 2023-01-14 06:35:11 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c7eda3df7b042d45e06bc24e84898ca0a53ed459d8c128300d6a4c4985973da9": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.679: INFO: At 2023-01-14 06:35:12 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "3c47f1a57737d8445778339417e278a52836deef31b9bbf28e12beb3896a1047": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.679: INFO: At 2023-01-14 06:35:23 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Jan 14 06:40:37.679: INFO: At 2023-01-14 06:35:24 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} Started: Started container webserver Jan 14 06:40:37.679: INFO: At 2023-01-14 06:35:24 +0000 UTC - event for netserver-0: {kubelet i-0526f6963633e8375} Created: Created container webserver Jan 14 06:40:37.679: INFO: At 2023-01-14 06:35:26 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5cea1dfd46c28da1dfb112bef4fcead6c15238d16120e89a5a1f629550b1518c": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.679: INFO: At 2023-01-14 06:35:41 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "ccfb644f27cd7bdf1885870ebd164fc42df4ee256b527c61682e94023904aacc": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.679: INFO: At 2023-01-14 06:35:55 +0000 UTC - event for netserver-2: {kubelet i-0930a50194a147b36} FailedCreatePodSandBox: (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "d10e525cd5aa6a330440149c2f7d15a41fce5b13904ab210f8e43677d04d0c33": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:40:37.782: INFO: POD NODE PHASE GRACE CONDITIONS Jan 14 06:40:37.782: INFO: netserver-0 i-0526f6963633e8375 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:35:36 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:35:36 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC }] Jan 14 06:40:37.782: INFO: netserver-1 i-06bd219a44e00580c Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:17 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:17 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC }] Jan 14 06:40:37.782: INFO: netserver-2 i-0930a50194a147b36 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC }] Jan 14 06:40:37.782: INFO: netserver-3 i-095cd924e787c9946 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:16 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:16 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:33:56 +0000 UTC }] Jan 14 06:40:37.782: INFO: Jan 14 06:40:38.102: INFO: Unable to fetch pod-network-test-6406/netserver-2/webserver logs: the server rejected our request for an unknown reason (get pods netserver-2) Jan 14 06:40:38.312: INFO: Logging node info for node i-0526f6963633e8375 Jan 14 06:40:38.416: INFO: Node Info: &Node{ObjectMeta:{i-0526f6963633e8375 b8bbb07c-e234-4117-968a-d4f54d957b46 15709 0 2023-01-14 06:26:11 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a io.kubernetes.storage.mock/node:some-mock-node kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0526f6963633e8375 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0526f6963633e8375 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.59.50 csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-4581":"i-0526f6963633e8375","ebs.csi.aws.com":"i-0526f6963633e8375"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.4.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:39:21 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:39:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:io.kubernetes.storage.mock/node":{},"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.4.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0526f6963633e8375,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.4.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:39:25 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:39:25 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:39:25 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:39:25 +0000 UTC,LastTransitionTime:2023-01-14 06:26:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.59.50,},NodeAddress{Type:ExternalIP,Address:13.38.88.176,},NodeAddress{Type:InternalDNS,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-88-176.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec24f53d700a2e9399be7e5e2cc1e943,SystemUUID:ec24f53d-700a-2e93-99be-7e5e2cc1e943,BootID:58d231c1-9ab3-4e54-9948-319cfad92d73,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[gcr.io/authenticated-image-pulling/alpine@sha256:7ff177862cb50c602bfe81f805969412e619c054a2bbead977d0c276988aa4a0 gcr.io/authenticated-image-pulling/alpine:3.7],SizeBytes:2110879,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/csi-mock-csi-mock-volumes-4581^2cadbc7d-93d6-11ed-858d-76d972479176],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-mock-csi-mock-volumes-4581^2cadbc7d-93d6-11ed-858d-76d972479176,DevicePath:,},},Config:nil,},} Jan 14 06:40:38.416: INFO: Logging kubelet events for node i-0526f6963633e8375 Jan 14 06:40:38.522: INFO: Logging pods the kubelet thinks is on node i-0526f6963633e8375 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6xkx2 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-kvr9w started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:40:38.737: INFO: pod-subpath-test-preprovisionedpv-2gt8 started at 2023-01-14 06:37:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container test-container-subpath-preprovisionedpv-2gt8 ready: false, restart count 0 Jan 14 06:40:38.737: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:36 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:38.737: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:40:38.737: INFO: Container driver-registrar ready: true, restart count 0 Jan 14 06:40:38.737: INFO: Container mock ready: true, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-crc8k started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: netserver-0 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container webserver ready: true, restart count 0 Jan 14 06:40:38.737: INFO: pvc-volume-tester-lrm5k started at 2023-01-14 06:39:20 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container volume-tester ready: false, restart count 0 Jan 14 06:40:38.737: INFO: pod-subpath-test-secret-7ssj started at 2023-01-14 06:39:23 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container test-container-subpath-secret-7ssj ready: false, restart count 0 Jan 14 06:40:38.737: INFO: suspend-false-to-true-cbsf6 started at 2023-01-14 06:39:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container c ready: false, restart count 0 Jan 14 06:40:38.737: INFO: security-context-9dbfb503-c405-47ac-9294-6ea974357a5f started at 2023-01-14 06:38:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container test-container ready: false, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-66vns started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:33:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8z92 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: sample-webhook-deployment-865554f4d9-r87jf started at 2023-01-14 06:40:32 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container sample-webhook ready: false, restart count 0 Jan 14 06:40:38.737: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:40:38.737: INFO: ebs-csi-node-r8qfk started at 2023-01-14 06:26:12 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:38.737: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:40:38.737: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-mh56b started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: update-demo-nautilus-mrn8r started at 2023-01-14 06:38:46 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container update-demo ready: false, restart count 0 Jan 14 06:40:38.737: INFO: cilium-tv25q started at 2023-01-14 06:26:12 +0000 UTC (1+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:40:38.737: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:40:38.737: INFO: pod-projected-configmaps-bb73475c-05a7-4e4e-8b32-9886f2febf29 started at 2023-01-14 06:39:21 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:40:38.737: INFO: csi-mockplugin-0 started at 2023-01-14 06:35:25 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:38.737: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:40:38.737: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:40:38.737: INFO: Container mock ready: false, restart count 0 Jan 14 06:40:38.737: INFO: bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb started at 2023-01-14 06:38:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb ready: false, restart count 0 Jan 14 06:40:38.737: INFO: hostexec-i-0526f6963633e8375-7mnst started at 2023-01-14 06:32:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-v4q7f started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-l2cw9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-p4cww started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:38.737: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ts46q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:38.737: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.163: INFO: Latency metrics for node i-0526f6963633e8375 Jan 14 06:40:39.163: INFO: Logging node info for node i-06bd219a44e00580c Jan 14 06:40:39.267: INFO: Node Info: &Node{ObjectMeta:{i-06bd219a44e00580c c2a57daf-87e6-4c31-ab8d-158cf1752c85 16062 0 2023-01-14 06:26:09 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-06bd219a44e00580c kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-06bd219a44e00580c topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.61.252 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-06bd219a44e00580c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.3.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:35:28 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:40:38 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.3.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-06bd219a44e00580c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.3.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:40:38 +0000 UTC,LastTransitionTime:2023-01-14 06:26:21 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.61.252,},NodeAddress{Type:ExternalIP,Address:15.237.110.205,},NodeAddress{Type:InternalDNS,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-110-205.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec28b615c9f51208890a610e546cafd1,SystemUUID:ec28b615-c9f5-1208-890a-610e546cafd1,BootID:9cfe2407-336f-468c-b599-1b87cbc71140,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847 kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4 kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4,DevicePath:,},},Config:nil,},} Jan 14 06:40:39.267: INFO: Logging kubelet events for node i-06bd219a44e00580c Jan 14 06:40:39.372: INFO: Logging pods the kubelet thinks is on node i-06bd219a44e00580c Jan 14 06:40:39.485: INFO: pod-disruption-failure-ignore-1-f5m4k started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: false, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rlwrq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ztghd started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: update-demo-nautilus-wzc78 started at 2023-01-14 06:38:46 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container update-demo ready: false, restart count 0 Jan 14 06:40:39.485: INFO: pod-disruption-failure-ignore-1-xjff6 started at 2023-01-14 06:34:27 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: false, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6nwrq started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: pod-disruption-failure-ignore-1-6gwgt started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: false, restart count 0 Jan 14 06:40:39.485: INFO: pod-db681d65-3e7a-406a-aa51-b57c4ce4869e started at 2023-01-14 06:39:09 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container test-container ready: false, restart count 0 Jan 14 06:40:39.485: INFO: pod-disruption-failure-ignore-1-ghlhh started at 2023-01-14 06:34:12 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: false, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zbhxz started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:13 +0000 UTC (0+7 container statuses recorded) Jan 14 06:40:39.485: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:40:39.485: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:40:39.485: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:40:39.485: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:40:39.485: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:40:39.485: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:40:39.485: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:40:39.485: INFO: cilium-k6c6s started at 2023-01-14 06:26:10 +0000 UTC (1+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:40:39.485: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:40:39.485: INFO: downwardapi-volume-75c6efde-4aed-47bb-8563-2d685d442490 started at 2023-01-14 06:39:23 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container client-container ready: false, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q2x2n started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: ebs-csi-node-62qzb started at 2023-01-14 06:26:10 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:39.485: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:40:39.485: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:40:39.485: INFO: netserver-1 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container webserver ready: true, restart count 0 Jan 14 06:40:39.485: INFO: liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 started at 2023-01-14 06:39:09 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:40:39.485: INFO: pod-disruption-failure-ignore-1-crpgt started at 2023-01-14 06:34:01 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: false, restart count 0 Jan 14 06:40:39.485: INFO: pod-edd099ae-7d89-4db8-9a86-b238ae68aba9 started at 2023-01-14 06:39:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container write-pod ready: false, restart count 0 Jan 14 06:40:39.485: INFO: pod-disruption-failure-ignore-1-5xwhv started at 2023-01-14 06:34:05 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: false, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-cf86c started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: inline-volume-tester-2fmpv started at 2023-01-14 06:34:58 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k776q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-gsthr started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: inline-volume-tester-v5nnb started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:40:39.485: INFO: pod-disruption-failure-ignore-1-frbqx started at 2023-01-14 06:34:08 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: false, restart count 0 Jan 14 06:40:39.485: INFO: test-ss-0 started at 2023-01-14 06:34:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container webserver ready: false, restart count 0 Jan 14 06:40:39.485: INFO: dns-test-1b49684b-e070-4d75-b586-93f0f22c501e started at 2023-01-14 06:39:25 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:39.485: INFO: Container jessie-querier ready: false, restart count 0 Jan 14 06:40:39.485: INFO: Container querier ready: false, restart count 0 Jan 14 06:40:39.485: INFO: Container webserver ready: false, restart count 0 Jan 14 06:40:39.485: INFO: suspend-false-to-true-w87tb started at 2023-01-14 06:39:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: false, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-jzs4v started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: hostexec-i-06bd219a44e00580c-vkgh8 started at 2023-01-14 06:39:10 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:40:39.485: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dbmt8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:39.485: INFO: pod-disruption-failure-ignore-0-dbk78 started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:39.485: INFO: Container c ready: true, restart count 0 Jan 14 06:40:39.906: INFO: Latency metrics for node i-06bd219a44e00580c Jan 14 06:40:39.906: INFO: Logging node info for node i-0930a50194a147b36 Jan 14 06:40:40.009: INFO: Node Info: &Node{ObjectMeta:{i-0930a50194a147b36 4316b3c5-1eeb-4ee2-9818-40f99d51117d 14934 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0930a50194a147b36 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0930a50194a147b36 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.36.60 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0930a50194a147b36"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.1.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:37:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.1.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0930a50194a147b36,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.1.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:19 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.36.60,},NodeAddress{Type:ExternalIP,Address:15.237.49.122,},NodeAddress{Type:InternalDNS,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-49-122.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2ac5dccb44f409fdc575df19a0b9a7,SystemUUID:ec2ac5dc-cb44-f409-fdc5-75df19a0b9a7,BootID:9dff06f2-e51d-4b5e-a657-e8f546eded95,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:68d396900aeaa072c1f27289485fdac29834045a6f3ffe369bf389d830ef572d registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.6],SizeBytes:20293261,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:40:40.010: INFO: Logging kubelet events for node i-0930a50194a147b36 Jan 14 06:40:40.115: INFO: Logging pods the kubelet thinks is on node i-0930a50194a147b36 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g4xd9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: ebs-csi-node-rpzft started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:40.225: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:40:40.225: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-pkvpm started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: coredns-559769c974-5xkn6 started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container coredns ready: true, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g6wnp started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zjjvx started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-slt2z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: coredns-autoscaler-7cb5c5b969-svc7j started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container autoscaler ready: true, restart count 0 Jan 14 06:40:40.225: INFO: netserver-2 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container webserver ready: false, restart count 0 Jan 14 06:40:40.225: INFO: csi-mockplugin-0 started at 2023-01-14 06:37:42 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:40.225: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:40:40.225: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:40:40.225: INFO: Container mock ready: false, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rfg9z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-xxqp8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q9h4x started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dw26q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.225: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:37:43 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:40:40.225: INFO: cilium-75rxm started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:40:40.225: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:40:40.225: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-bb7qt started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.225: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.602: INFO: Latency metrics for node i-0930a50194a147b36 Jan 14 06:40:40.602: INFO: Logging node info for node i-095cd924e787c9946 Jan 14 06:40:40.705: INFO: Node Info: &Node{ObjectMeta:{i-095cd924e787c9946 7ac98e5e-c131-42e0-a67e-ba9b45d163a4 15074 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-095cd924e787c9946 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-095cd924e787c9946 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.51.27 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-095cd924e787c9946"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.2.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:38:00 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.2.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-095cd924e787c9946,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.2.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:20 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.51.27,},NodeAddress{Type:ExternalIP,Address:13.38.27.88,},NodeAddress{Type:InternalDNS,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-27-88.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2449c051467854b20245f8e87294d1,SystemUUID:ec2449c0-5146-7854-b202-45f8e87294d1,BootID:11ed24c0-6b48-4372-960a-a4095c73f4ca,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/sample-apiserver@sha256:8d70890151aa5d096f331cb9da1b9cd5be0412b7363fe67b5c3befdcaa2a28d0 registry.k8s.io/e2e-test-images/sample-apiserver:1.17.7],SizeBytes:25667066,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:40:40.706: INFO: Logging kubelet events for node i-095cd924e787c9946 Jan 14 06:40:40.811: INFO: Logging pods the kubelet thinks is on node i-095cd924e787c9946 Jan 14 06:40:40.921: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:34 +0000 UTC (0+7 container statuses recorded) Jan 14 06:40:40.921: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:40:40.921: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:40:40.921: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:40:40.921: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:40:40.921: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:40:40.921: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:40:40.921: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:40:40.921: INFO: coredns-559769c974-lpb2c started at 2023-01-14 06:26:42 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container coredns ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k5vzv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-79v7d started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: ebs-csi-node-q6j9r started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:40.921: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:40:40.921: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-hghxq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-qnwnv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cilium-kpqdf started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:40:40.921: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:40:40.921: INFO: netserver-3 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container webserver ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-7bdwh started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-94lp8 started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: test-pod started at 2023-01-14 06:38:30 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container webserver ready: false, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8pkw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-d9fvw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-8x9bt started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:40.921: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ppzfj started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:40.921: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:40:41.306: INFO: Latency metrics for node i-095cd924e787c9946 Jan 14 06:40:41.306: INFO: Logging node info for node i-0ea715ad3f7d7c666 Jan 14 06:40:41.410: INFO: Node Info: &Node{ObjectMeta:{i-0ea715ad3f7d7c666 1b9ffdb4-6e31-4298-bf35-45383b8cddd4 14687 0 2023-01-14 06:24:19 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:c5.large beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kops.k8s.io/kops-controller-pki: kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0ea715ad3f7d7c666 kubernetes.io/os:linux node-role.kubernetes.io/control-plane: node.kubernetes.io/exclude-from-external-load-balancers: node.kubernetes.io/instance-type:c5.large topology.ebs.csi.aws.com/zone:eu-west-3a topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.43.108 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0ea715ad3f7d7c666"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2023-01-14 06:24:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {protokube Update v1 2023-01-14 06:24:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kops.k8s.io/kops-controller-pki":{},"f:node-role.kubernetes.io/control-plane":{},"f:node.kubernetes.io/exclude-from-external-load-balancers":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:25:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.0.0/24\"":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:taints":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kubelet Update v1 2023-01-14 06:36:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.0.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0ea715ad3f7d7c666,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/control-plane,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[100.96.0.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3892301824 0} {<nil>} 3801076Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3787444224 0} {<nil>} 3698676Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:25:06 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.43.108,},NodeAddress{Type:ExternalIP,Address:13.37.224.194,},NodeAddress{Type:InternalDNS,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-37-224-194.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec29ac40ecac712560f472ac147406f5,SystemUUID:ec29ac40-ecac-7125-60f4-72ac147406f5,BootID:6aefaddb-a8fb-42ca-b933-086be838242c,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/etcdadm/etcd-manager@sha256:66a453db625abb268f4b3bbefc5a34a171d81e6e8796cecca54cfd71775c77c4 registry.k8s.io/etcdadm/etcd-manager:v3.0.20221209],SizeBytes:231502799,},ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.26.0],SizeBytes:135162323,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.26.0],SizeBytes:124991801,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.26.0],SizeBytes:57657656,},ContainerImage{Names:[registry.k8s.io/kops/kops-controller:1.27.0-alpha.1],SizeBytes:43455400,},ContainerImage{Names:[registry.k8s.io/kops/dns-controller:1.27.0-alpha.1],SizeBytes:42802033,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[quay.io/cilium/operator@sha256:a6d24a006a6b92967ac90786b49bc1ac26e5477cf028cd1186efcfc2466484db quay.io/cilium/operator:v1.12.5],SizeBytes:26802430,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 registry.k8s.io/sig-storage/csi-provisioner:v3.1.0],SizeBytes:23345856,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 registry.k8s.io/sig-storage/csi-resizer:v1.4.0],SizeBytes:22381475,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b registry.k8s.io/sig-storage/csi-attacher:v3.4.0],SizeBytes:22085298,},ContainerImage{Names:[registry.k8s.io/provider-aws/cloud-controller-manager@sha256:fdeb61e3e42ecd9cca868d550ebdb88dd6341d9e91fcfa9a37e227dab2ad22cb registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.0],SizeBytes:20154862,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/kops/kube-apiserver-healthcheck:1.27.0-alpha.1],SizeBytes:4967345,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:40:41.410: INFO: Logging kubelet events for node i-0ea715ad3f7d7c666 Jan 14 06:40:41.516: INFO: Logging pods the kubelet thinks is on node i-0ea715ad3f7d7c666 Jan 14 06:40:41.625: INFO: aws-cloud-controller-manager-8g49k started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Container aws-cloud-controller-manager ready: true, restart count 0 Jan 14 06:40:41.625: INFO: cilium-operator-5dd44dc49f-hdhf7 started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Container cilium-operator ready: true, restart count 0 Jan 14 06:40:41.625: INFO: etcd-manager-events-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:40:41.625: INFO: kops-controller-8ntms started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Container kops-controller ready: true, restart count 0 Jan 14 06:40:41.625: INFO: kube-controller-manager-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Container kube-controller-manager ready: true, restart count 2 Jan 14 06:40:41.625: INFO: kube-scheduler-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Container kube-scheduler ready: true, restart count 0 Jan 14 06:40:41.625: INFO: cilium-vl5tq started at 2023-01-14 06:24:57 +0000 UTC (1+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:40:41.625: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:40:41.625: INFO: ebs-csi-node-knngk started at 2023-01-14 06:24:57 +0000 UTC (0+3 container statuses recorded) Jan 14 06:40:41.625: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:40:41.625: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:40:41.625: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:40:41.625: INFO: dns-controller-69987775c6-66b5p started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Container dns-controller ready: true, restart count 0 Jan 14 06:40:41.625: INFO: ebs-csi-controller-5bd98b456f-zxg2l started at 2023-01-14 06:24:57 +0000 UTC (0+5 container statuses recorded) Jan 14 06:40:41.625: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:40:41.625: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:40:41.625: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:40:41.625: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:40:41.625: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:40:41.625: INFO: etcd-manager-main-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:40:41.625: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:40:41.625: INFO: kube-apiserver-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+2 container statuses recorded) Jan 14 06:40:41.625: INFO: Container healthcheck ready: true, restart count 0 Jan 14 06:40:41.625: INFO: Container kube-apiserver ready: true, restart count 1 Jan 14 06:40:42.003: INFO: Latency metrics for node i-0ea715ad3f7d7c666 [DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 �[1mSTEP:�[0m Destroying namespace "pod-network-test-6406" for this suite. �[38;5;243m01/14/23 06:40:42.003�[0m
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sProxy\sversion\sv1\sshould\sproxy\sthrough\sa\sservice\sand\sa\spod\s\s\[Conformance\]$'
test/e2e/network/proxy.go:180 k8s.io/kubernetes/test/e2e/network.glob..func25.1.3() test/e2e/network/proxy.go:180 +0xab0from junit_01.xml
[BeforeEach] version v1 set up framework | framework.go:178 �[1mSTEP:�[0m Creating a kubernetes client �[38;5;243m01/14/23 06:34:02.377�[0m Jan 14 06:34:02.377: INFO: >>> kubeConfig: /root/.kube/config �[1mSTEP:�[0m Building a namespace api object, basename proxy �[38;5;243m01/14/23 06:34:02.378�[0m �[1mSTEP:�[0m Waiting for a default service account to be provisioned in namespace �[38;5;243m01/14/23 06:34:02.688�[0m �[1mSTEP:�[0m Waiting for kube-root-ca.crt to be provisioned in namespace �[38;5;243m01/14/23 06:34:02.893�[0m [BeforeEach] version v1 test/e2e/framework/metrics/init/init.go:31 [It] should proxy through a service and a pod [Conformance] test/e2e/network/proxy.go:101 �[1mSTEP:�[0m starting an echo server on multiple ports �[38;5;243m01/14/23 06:34:03.207�[0m �[1mSTEP:�[0m creating replication controller proxy-service-hs9zl in namespace proxy-1926 �[38;5;243m01/14/23 06:34:03.208�[0m I0114 06:34:03.312689 6756 runners.go:193] Created replication controller with name: proxy-service-hs9zl, namespace: proxy-1926, replica count: 1 I0114 06:34:04.463519 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:05.464276 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:06.465049 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:07.466026 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:08.467138 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:09.468268 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:10.468763 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:11.469008 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:12.469899 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:13.469996 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:14.470399 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:15.471114 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:16.472230 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:17.472361 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:18.473266 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:19.474124 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:20.474352 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:21.474872 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:22.476008 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:23.477054 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:24.477276 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:25.477661 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:26.477986 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:27.478795 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:28.478982 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:29.480153 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:30.480393 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:31.480609 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:32.481117 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:33.482226 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:34.483202 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:35.484324 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:36.485151 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:37.485929 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:38.486178 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:39.487285 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:40.487517 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:41.487869 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:42.488552 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:43.488740 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:44.488976 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:45.489196 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:46.490215 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:47.490722 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:48.491096 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:49.491236 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:50.491343 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:51.491527 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:52.491667 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:53.492017 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:54.492134 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:55.492342 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:56.493312 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:57.493977 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:58.494375 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:34:59.494634 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:00.494897 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:01.496017 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:02.496236 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:03.497246 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:04.497359 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:05.497575 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:06.498546 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:07.499270 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:08.499599 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:09.499957 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:10.500196 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:11.500273 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:12.500873 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:13.501209 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:14.501705 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:15.501926 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:16.503069 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:17.503696 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:18.503794 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:19.504005 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:20.504246 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:21.504423 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:22.504989 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:23.505168 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:24.505357 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:25.506103 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:26.506334 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:27.506810 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:28.506989 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:29.507190 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:30.507441 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:31.507882 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:32.508323 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:33.508517 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:34.509725 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:35.509969 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:36.510255 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:37.510424 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:38.510684 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:39.510908 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:40.511166 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:41.512103 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:42.512569 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:43.512808 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:44.513034 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:45.513231 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:46.513431 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:47.514358 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:48.514557 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:49.514935 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:50.515097 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:51.515519 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:52.516208 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:53.516755 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:54.516978 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:55.517283 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:56.518237 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:57.518856 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:58.519107 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:35:59.519193 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:00.519983 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:01.520209 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:02.520337 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:03.520794 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:04.521601 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:05.523230 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:06.524231 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:07.524973 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:08.526131 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:09.527232 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:10.528147 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:11.528336 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:12.529256 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:13.530345 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:14.530546 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:15.531131 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:16.531436 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:17.532048 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:18.532155 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:19.533251 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:20.534211 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:21.535267 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:22.535818 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:23.536129 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:24.537220 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:25.538271 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:26.538771 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:27.539440 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:28.539785 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:29.539985 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:30.540284 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:31.540485 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:32.541157 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:33.542142 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:34.542515 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:35.542883 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:36.543107 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:37.544069 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:38.544265 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:39.545336 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:40.546416 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:41.547374 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:42.548525 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:43.548768 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:44.548983 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:45.549189 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:46.549452 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:47.550218 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:48.550434 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:49.550672 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:50.550947 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:51.551091 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:52.551742 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:53.551952 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:54.552103 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:55.552213 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:56.553254 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:57.553969 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:58.554781 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:36:59.555063 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:00.555217 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:01.556293 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:02.556941 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:03.557176 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:04.557875 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:05.558070 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:06.558254 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:07.558993 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:08.559135 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:09.559468 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:10.559770 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:11.560070 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:12.560244 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:13.560449 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:14.560966 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:15.561403 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:16.561643 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:17.562323 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:18.563211 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:19.564415 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:20.564666 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:21.565162 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:22.565972 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:23.566310 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:24.566829 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:25.567050 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:26.567254 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:27.567444 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:28.567691 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:29.568762 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:30.569009 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:31.569605 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:32.570111 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:33.571286 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:34.572348 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:35.572553 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:36.572902 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:37.573326 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:38.574203 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:39.574411 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:40.574602 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:41.574807 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:42.575216 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:43.575435 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:44.575953 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:45.576241 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:46.576353 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:47.576731 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:48.576942 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:49.577221 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:50.578384 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:51.578619 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:52.579771 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:53.580954 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:54.581910 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:55.582799 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:56.583837 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:57.584255 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:58.584778 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:37:59.584959 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:00.585864 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:01.586876 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:02.587083 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:03.587524 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:04.588565 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:05.589544 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:06.589889 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:07.590867 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:08.591568 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:09.591894 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:10.592921 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:11.593962 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:12.595084 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:13.595519 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:14.595970 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:15.596975 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:16.598109 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:17.598782 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:18.599745 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:19.600787 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:20.601004 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:21.601488 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:22.601904 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:23.602815 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:24.603927 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:25.604265 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:26.604696 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:27.605204 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:28.605586 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:29.605976 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:30.606654 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:31.606813 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:32.607125 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:33.607679 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:34.608404 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:35.609022 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:36.610097 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:37.610810 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:38.611970 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:39.612756 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:40.613132 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:41.613721 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:42.614170 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:43.614632 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:44.614718 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:45.615710 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:46.615804 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:47.616706 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:48.617901 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:49.619189 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:50.619713 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:51.620154 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:52.620725 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:53.620849 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:54.621993 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:55.622861 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:56.623979 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:57.625044 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:58.625540 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:38:59.625824 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:39:00.626020 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:39:01.626445 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:39:02.627164 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:39:03.627917 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:39:04.628669 6756 runners.go:193] proxy-service-hs9zl Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I0114 06:39:04.731610 6756 runners.go:193] Pod proxy-service-hs9zl-dsx96 i-0526f6963633e8375 Pending <nil> Jan 14 06:39:04.731: INFO: Unexpected error: <*errors.errorString | 0xc00056ebd0>: { s: "only 0 pods started out of 1", } Jan 14 06:39:04.731: FAIL: only 0 pods started out of 1 Full Stack Trace k8s.io/kubernetes/test/e2e/network.glob..func25.1.3() test/e2e/network/proxy.go:180 +0xab0 [AfterEach] version v1 test/e2e/framework/node/init/init.go:32 Jan 14 06:39:04.732: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] version v1 test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] version v1 dump namespaces | framework.go:196 �[1mSTEP:�[0m dump namespace information after failure �[38;5;243m01/14/23 06:39:04.835�[0m �[1mSTEP:�[0m Collecting events from namespace "proxy-1926". �[38;5;243m01/14/23 06:39:04.835�[0m �[1mSTEP:�[0m Found 12 events. �[38;5;243m01/14/23 06:39:04.94�[0m Jan 14 06:39:04.940: INFO: At 2023-01-14 06:34:03 +0000 UTC - event for proxy-service-hs9zl: {replication-controller } SuccessfulCreate: Created pod: proxy-service-hs9zl-dsx96 Jan 14 06:39:04.940: INFO: At 2023-01-14 06:34:03 +0000 UTC - event for proxy-service-hs9zl-dsx96: {default-scheduler } Scheduled: Successfully assigned proxy-1926/proxy-service-hs9zl-dsx96 to i-0526f6963633e8375 Jan 14 06:39:04.940: INFO: At 2023-01-14 06:34:03 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "42a2098ca9be1e8d2bc97a312e9e87d17954906fe3b5975225e6a78d95778f47": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:34:19 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "49b731a2b5677c5d77c6cb9ff90c23205f785112df92b77d21a309f5efddc14d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:34:31 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8835e3e43a29d3abbe2368189e490b3fcc7f7443e22957420a8cee71a8525a6": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:34:45 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8297b6b8f681bd84f3c532180bfa8927e07dc9d4b8f3df4105963a490176ed4a": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:34:57 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e7cbee16567eb4d7755d22d733cdce2ac027648b1027aa046a4988fc19ce369f": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:35:09 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "298af825da7c5f2bd4f14460e7629f5bc08dd8b5d3c79f89998b26adb995cced": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:35:21 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bca267a55fe1ae417796ec346c10b69836f148c1f21ffd32af9d43c50e65af44": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:35:34 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "d4adfc9b7a6f6a08a69d2e3c6f0fd5ef78d8ef25cd88547f170584af9e51572a": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:35:49 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8c72263b9a408ae90545adb663bdf2b48be3b8cf8b62ffe1da0b5e08c4557213": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:04.940: INFO: At 2023-01-14 06:36:01 +0000 UTC - event for proxy-service-hs9zl-dsx96: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "20965cb3dd3f3272bcfa8005d933374b6a2ef250dba75e806a8ca3e244055627": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:05.043: INFO: POD NODE PHASE GRACE CONDITIONS Jan 14 06:39:05.043: INFO: proxy-service-hs9zl-dsx96 i-0526f6963633e8375 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:03 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:03 +0000 UTC ContainersNotReady containers with unready status: [proxy-service-hs9zl]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:03 +0000 UTC ContainersNotReady containers with unready status: [proxy-service-hs9zl]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:03 +0000 UTC }] Jan 14 06:39:05.043: INFO: Jan 14 06:39:05.147: INFO: Unable to fetch proxy-1926/proxy-service-hs9zl-dsx96/proxy-service-hs9zl logs: the server rejected our request for an unknown reason (get pods proxy-service-hs9zl-dsx96) Jan 14 06:39:05.251: INFO: Logging node info for node i-0526f6963633e8375 Jan 14 06:39:05.353: INFO: Node Info: &Node{ObjectMeta:{i-0526f6963633e8375 b8bbb07c-e234-4117-968a-d4f54d957b46 15086 0 2023-01-14 06:26:11 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a io.kubernetes.storage.mock/node:some-mock-node kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0526f6963633e8375 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0526f6963633e8375 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.59.50 csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-4581":"i-0526f6963633e8375","ebs.csi.aws.com":"i-0526f6963633e8375"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.4.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:38:03 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:io.kubernetes.storage.mock/node":{},"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.4.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0526f6963633e8375,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.4.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.59.50,},NodeAddress{Type:ExternalIP,Address:13.38.88.176,},NodeAddress{Type:InternalDNS,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-88-176.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec24f53d700a2e9399be7e5e2cc1e943,SystemUUID:ec24f53d-700a-2e93-99be-7e5e2cc1e943,BootID:58d231c1-9ab3-4e54-9948-319cfad92d73,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[gcr.io/authenticated-image-pulling/alpine@sha256:7ff177862cb50c602bfe81f805969412e619c054a2bbead977d0c276988aa4a0 gcr.io/authenticated-image-pulling/alpine:3.7],SizeBytes:2110879,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:39:05.354: INFO: Logging kubelet events for node i-0526f6963633e8375 Jan 14 06:39:05.458: INFO: Logging pods the kubelet thinks is on node i-0526f6963633e8375 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6xkx2 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-kvr9w started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:39:05.573: INFO: pod-subpath-test-preprovisionedpv-2gt8 started at 2023-01-14 06:37:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container test-container-subpath-preprovisionedpv-2gt8 ready: false, restart count 0 Jan 14 06:39:05.573: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:36 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:05.573: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:39:05.573: INFO: Container driver-registrar ready: true, restart count 0 Jan 14 06:39:05.573: INFO: Container mock ready: true, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-crc8k started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: netserver-0 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container webserver ready: true, restart count 0 Jan 14 06:39:05.573: INFO: security-context-9dbfb503-c405-47ac-9294-6ea974357a5f started at 2023-01-14 06:38:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container test-container ready: false, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-66vns started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: proxy-service-hs9zl-dsx96 started at 2023-01-14 06:34:03 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container proxy-service-hs9zl ready: false, restart count 0 Jan 14 06:39:05.573: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:33:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8z92 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8 started at 2023-01-14 06:34:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container nginx ready: false, restart count 0 Jan 14 06:39:05.573: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:39:05.573: INFO: ebs-csi-node-r8qfk started at 2023-01-14 06:26:12 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:05.573: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:05.573: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:05.573: INFO: hostexec-i-0526f6963633e8375-kpqf6 started at 2023-01-14 06:33:54 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-mh56b started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: update-demo-nautilus-mrn8r started at 2023-01-14 06:38:46 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container update-demo ready: false, restart count 0 Jan 14 06:39:05.573: INFO: cilium-tv25q started at 2023-01-14 06:26:12 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:05.573: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:05.573: INFO: pod-subpath-test-preprovisionedpv-r2t6 started at 2023-01-14 06:34:12 +0000 UTC (0+2 container statuses recorded) Jan 14 06:39:05.573: INFO: Container test-container-subpath-preprovisionedpv-r2t6 ready: false, restart count 0 Jan 14 06:39:05.573: INFO: Container test-container-volume-preprovisionedpv-r2t6 ready: false, restart count 0 Jan 14 06:39:05.573: INFO: csi-mockplugin-0 started at 2023-01-14 06:35:25 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:05.573: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:05.573: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:39:05.573: INFO: Container mock ready: false, restart count 0 Jan 14 06:39:05.573: INFO: bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb started at 2023-01-14 06:38:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb ready: false, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-p4cww started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: hostexec-i-0526f6963633e8375-7mnst started at 2023-01-14 06:32:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-v4q7f started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-l2cw9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.573: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ts46q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:05.573: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:05.983: INFO: Latency metrics for node i-0526f6963633e8375 Jan 14 06:39:05.983: INFO: Logging node info for node i-06bd219a44e00580c Jan 14 06:39:06.087: INFO: Node Info: &Node{ObjectMeta:{i-06bd219a44e00580c c2a57daf-87e6-4c31-ab8d-158cf1752c85 14461 0 2023-01-14 06:26:09 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-06bd219a44e00580c kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-06bd219a44e00580c topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.61.252 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-06bd219a44e00580c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.3.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:35:28 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:35:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.3.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-06bd219a44e00580c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.3.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:21 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.61.252,},NodeAddress{Type:ExternalIP,Address:15.237.110.205,},NodeAddress{Type:InternalDNS,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-110-205.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec28b615c9f51208890a610e546cafd1,SystemUUID:ec28b615-c9f5-1208-890a-610e546cafd1,BootID:9cfe2407-336f-468c-b599-1b87cbc71140,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847 kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4 kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4,DevicePath:,},},Config:nil,},} Jan 14 06:39:06.087: INFO: Logging kubelet events for node i-06bd219a44e00580c Jan 14 06:39:06.192: INFO: Logging pods the kubelet thinks is on node i-06bd219a44e00580c Jan 14 06:39:06.308: INFO: cilium-k6c6s started at 2023-01-14 06:26:10 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:06.308: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q2x2n started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: pod-disruption-failure-ignore-1-crpgt started at 2023-01-14 06:34:01 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container c ready: false, restart count 0 Jan 14 06:39:06.308: INFO: ebs-csi-node-62qzb started at 2023-01-14 06:26:10 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:06.308: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:06.308: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:06.308: INFO: netserver-1 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container webserver ready: true, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k776q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-gsthr started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: inline-volume-tester-v5nnb started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:39:06.308: INFO: pod-disruption-failure-ignore-1-5xwhv started at 2023-01-14 06:34:05 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container c ready: false, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-cf86c started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: inline-volume-tester-2fmpv started at 2023-01-14 06:34:58 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-jzs4v started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: pod-disruption-failure-ignore-1-frbqx started at 2023-01-14 06:34:08 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container c ready: false, restart count 0 Jan 14 06:39:06.308: INFO: test-ss-0 started at 2023-01-14 06:34:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container webserver ready: false, restart count 0 Jan 14 06:39:06.308: INFO: pod-disruption-failure-ignore-0-dbk78 started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container c ready: true, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dbmt8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ztghd started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: update-demo-nautilus-wzc78 started at 2023-01-14 06:38:46 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container update-demo ready: false, restart count 0 Jan 14 06:39:06.308: INFO: var-expansion-5d68387b-eca3-4e4a-a1bc-06454f2a1ada started at 2023-01-14 06:35:23 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container dapi-container ready: false, restart count 0 Jan 14 06:39:06.308: INFO: pod-disruption-failure-ignore-1-f5m4k started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container c ready: false, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rlwrq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: pod-disruption-failure-ignore-1-xjff6 started at 2023-01-14 06:34:27 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container c ready: false, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6nwrq started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: pod-disruption-failure-ignore-1-6gwgt started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container c ready: false, restart count 0 Jan 14 06:39:06.308: INFO: pod-disruption-failure-ignore-1-ghlhh started at 2023-01-14 06:34:12 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container c ready: false, restart count 0 Jan 14 06:39:06.308: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zbhxz started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:06.308: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:06.308: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:13 +0000 UTC (0+7 container statuses recorded) Jan 14 06:39:06.308: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:39:06.308: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:06.308: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:39:06.308: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:39:06.308: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:39:06.308: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:39:06.308: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:39:06.705: INFO: Latency metrics for node i-06bd219a44e00580c Jan 14 06:39:06.705: INFO: Logging node info for node i-0930a50194a147b36 Jan 14 06:39:06.808: INFO: Node Info: &Node{ObjectMeta:{i-0930a50194a147b36 4316b3c5-1eeb-4ee2-9818-40f99d51117d 14934 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0930a50194a147b36 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0930a50194a147b36 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.36.60 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0930a50194a147b36"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.1.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:37:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.1.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0930a50194a147b36,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.1.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:19 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.36.60,},NodeAddress{Type:ExternalIP,Address:15.237.49.122,},NodeAddress{Type:InternalDNS,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-49-122.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2ac5dccb44f409fdc575df19a0b9a7,SystemUUID:ec2ac5dc-cb44-f409-fdc5-75df19a0b9a7,BootID:9dff06f2-e51d-4b5e-a657-e8f546eded95,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:68d396900aeaa072c1f27289485fdac29834045a6f3ffe369bf389d830ef572d registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.6],SizeBytes:20293261,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:39:06.808: INFO: Logging kubelet events for node i-0930a50194a147b36 Jan 14 06:39:06.913: INFO: Logging pods the kubelet thinks is on node i-0930a50194a147b36 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dw26q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:37:43 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-bb7qt started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cilium-75rxm started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:07.025: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g4xd9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: ebs-csi-node-rpzft started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:07.025: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:07.025: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g6wnp started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zjjvx started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-pkvpm started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: coredns-559769c974-5xkn6 started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container coredns ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-slt2z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: netserver-2 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container webserver ready: false, restart count 0 Jan 14 06:39:07.025: INFO: csi-mockplugin-0 started at 2023-01-14 06:37:42 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:07.025: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:07.025: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:39:07.025: INFO: Container mock ready: false, restart count 0 Jan 14 06:39:07.025: INFO: coredns-autoscaler-7cb5c5b969-svc7j started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container autoscaler ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rfg9z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-xxqp8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.025: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q9h4x started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.025: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.529: INFO: Latency metrics for node i-0930a50194a147b36 Jan 14 06:39:07.529: INFO: Logging node info for node i-095cd924e787c9946 Jan 14 06:39:07.633: INFO: Node Info: &Node{ObjectMeta:{i-095cd924e787c9946 7ac98e5e-c131-42e0-a67e-ba9b45d163a4 15074 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-095cd924e787c9946 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-095cd924e787c9946 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.51.27 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-095cd924e787c9946"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.2.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:38:00 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.2.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-095cd924e787c9946,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.2.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:20 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.51.27,},NodeAddress{Type:ExternalIP,Address:13.38.27.88,},NodeAddress{Type:InternalDNS,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-27-88.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2449c051467854b20245f8e87294d1,SystemUUID:ec2449c0-5146-7854-b202-45f8e87294d1,BootID:11ed24c0-6b48-4372-960a-a4095c73f4ca,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/sample-apiserver@sha256:8d70890151aa5d096f331cb9da1b9cd5be0412b7363fe67b5c3befdcaa2a28d0 registry.k8s.io/e2e-test-images/sample-apiserver:1.17.7],SizeBytes:25667066,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:39:07.633: INFO: Logging kubelet events for node i-095cd924e787c9946 Jan 14 06:39:07.739: INFO: Logging pods the kubelet thinks is on node i-095cd924e787c9946 Jan 14 06:39:07.849: INFO: ebs-csi-node-q6j9r started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:07.849: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:07.849: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-hghxq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: hostexec-i-095cd924e787c9946-xb47h started at 2023-01-14 06:33:55 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:39:07.849: INFO: cilium-kpqdf started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:07.849: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:07.849: INFO: netserver-3 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container webserver ready: true, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-qnwnv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:29 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:07.849: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container mock ready: false, restart count 0 Jan 14 06:39:07.849: INFO: pod-subpath-test-preprovisionedpv-mhww started at 2023-01-14 06:34:11 +0000 UTC (2+2 container statuses recorded) Jan 14 06:39:07.849: INFO: Init container init-volume-preprovisionedpv-mhww ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Init container test-init-subpath-preprovisionedpv-mhww ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container test-container-subpath-preprovisionedpv-mhww ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container test-container-volume-preprovisionedpv-mhww ready: false, restart count 0 Jan 14 06:39:07.849: INFO: pod-subpath-test-preprovisionedpv-d4th started at 2023-01-14 06:34:13 +0000 UTC (0+2 container statuses recorded) Jan 14 06:39:07.849: INFO: Container test-container-subpath-preprovisionedpv-d4th ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container test-container-volume-preprovisionedpv-d4th ready: false, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8pkw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-7bdwh started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-94lp8 started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: test-pod started at 2023-01-14 06:38:30 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container webserver ready: false, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-d9fvw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-8x9bt started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ppzfj started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: coredns-559769c974-lpb2c started at 2023-01-14 06:26:42 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container coredns ready: true, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k5vzv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:07.849: INFO: hostexec-i-095cd924e787c9946-c5rkj started at 2023-01-14 06:33:59 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:39:07.849: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:34 +0000 UTC (0+7 container statuses recorded) Jan 14 06:39:07.849: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:39:07.849: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:39:07.849: INFO: csi-mockplugin-resizer-0 started at 2023-01-14 06:33:29 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:39:07.849: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-79v7d started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:07.849: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:08.283: INFO: Latency metrics for node i-095cd924e787c9946 Jan 14 06:39:08.283: INFO: Logging node info for node i-0ea715ad3f7d7c666 Jan 14 06:39:08.386: INFO: Node Info: &Node{ObjectMeta:{i-0ea715ad3f7d7c666 1b9ffdb4-6e31-4298-bf35-45383b8cddd4 14687 0 2023-01-14 06:24:19 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:c5.large beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kops.k8s.io/kops-controller-pki: kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0ea715ad3f7d7c666 kubernetes.io/os:linux node-role.kubernetes.io/control-plane: node.kubernetes.io/exclude-from-external-load-balancers: node.kubernetes.io/instance-type:c5.large topology.ebs.csi.aws.com/zone:eu-west-3a topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.43.108 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0ea715ad3f7d7c666"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2023-01-14 06:24:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {protokube Update v1 2023-01-14 06:24:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kops.k8s.io/kops-controller-pki":{},"f:node-role.kubernetes.io/control-plane":{},"f:node.kubernetes.io/exclude-from-external-load-balancers":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:25:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.0.0/24\"":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:taints":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kubelet Update v1 2023-01-14 06:36:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.0.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0ea715ad3f7d7c666,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/control-plane,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[100.96.0.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3892301824 0} {<nil>} 3801076Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3787444224 0} {<nil>} 3698676Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:25:06 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.43.108,},NodeAddress{Type:ExternalIP,Address:13.37.224.194,},NodeAddress{Type:InternalDNS,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-37-224-194.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec29ac40ecac712560f472ac147406f5,SystemUUID:ec29ac40-ecac-7125-60f4-72ac147406f5,BootID:6aefaddb-a8fb-42ca-b933-086be838242c,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/etcdadm/etcd-manager@sha256:66a453db625abb268f4b3bbefc5a34a171d81e6e8796cecca54cfd71775c77c4 registry.k8s.io/etcdadm/etcd-manager:v3.0.20221209],SizeBytes:231502799,},ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.26.0],SizeBytes:135162323,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.26.0],SizeBytes:124991801,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.26.0],SizeBytes:57657656,},ContainerImage{Names:[registry.k8s.io/kops/kops-controller:1.27.0-alpha.1],SizeBytes:43455400,},ContainerImage{Names:[registry.k8s.io/kops/dns-controller:1.27.0-alpha.1],SizeBytes:42802033,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[quay.io/cilium/operator@sha256:a6d24a006a6b92967ac90786b49bc1ac26e5477cf028cd1186efcfc2466484db quay.io/cilium/operator:v1.12.5],SizeBytes:26802430,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 registry.k8s.io/sig-storage/csi-provisioner:v3.1.0],SizeBytes:23345856,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 registry.k8s.io/sig-storage/csi-resizer:v1.4.0],SizeBytes:22381475,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b registry.k8s.io/sig-storage/csi-attacher:v3.4.0],SizeBytes:22085298,},ContainerImage{Names:[registry.k8s.io/provider-aws/cloud-controller-manager@sha256:fdeb61e3e42ecd9cca868d550ebdb88dd6341d9e91fcfa9a37e227dab2ad22cb registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.0],SizeBytes:20154862,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/kops/kube-apiserver-healthcheck:1.27.0-alpha.1],SizeBytes:4967345,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:39:08.386: INFO: Logging kubelet events for node i-0ea715ad3f7d7c666 Jan 14 06:39:08.491: INFO: Logging pods the kubelet thinks is on node i-0ea715ad3f7d7c666 Jan 14 06:39:08.599: INFO: etcd-manager-events-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:39:08.599: INFO: kops-controller-8ntms started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Container kops-controller ready: true, restart count 0 Jan 14 06:39:08.599: INFO: aws-cloud-controller-manager-8g49k started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Container aws-cloud-controller-manager ready: true, restart count 0 Jan 14 06:39:08.599: INFO: cilium-operator-5dd44dc49f-hdhf7 started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Container cilium-operator ready: true, restart count 0 Jan 14 06:39:08.599: INFO: ebs-csi-node-knngk started at 2023-01-14 06:24:57 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:08.599: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:08.599: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:08.599: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:08.599: INFO: dns-controller-69987775c6-66b5p started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Container dns-controller ready: true, restart count 0 Jan 14 06:39:08.599: INFO: ebs-csi-controller-5bd98b456f-zxg2l started at 2023-01-14 06:24:57 +0000 UTC (0+5 container statuses recorded) Jan 14 06:39:08.599: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:39:08.599: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:39:08.599: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:39:08.599: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:08.599: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:08.599: INFO: etcd-manager-main-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:39:08.599: INFO: kube-apiserver-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+2 container statuses recorded) Jan 14 06:39:08.599: INFO: Container healthcheck ready: true, restart count 0 Jan 14 06:39:08.599: INFO: Container kube-apiserver ready: true, restart count 1 Jan 14 06:39:08.599: INFO: kube-controller-manager-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Container kube-controller-manager ready: true, restart count 2 Jan 14 06:39:08.599: INFO: kube-scheduler-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Container kube-scheduler ready: true, restart count 0 Jan 14 06:39:08.599: INFO: cilium-vl5tq started at 2023-01-14 06:24:57 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:08.599: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:08.599: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:08.980: INFO: Latency metrics for node i-0ea715ad3f7d7c666 [DeferCleanup (Each)] version v1 tear down framework | framework.go:193 �[1mSTEP:�[0m Destroying namespace "proxy-1926" for this suite. �[38;5;243m01/14/23 06:39:08.98�[0m
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-node\]\sPreStop\sgraceful\spod\sterminated\sshould\swait\suntil\spreStop\shook\scompletes\sthe\sprocess$'
test/e2e/node/pre_stop.go:182 k8s.io/kubernetes/test/e2e/node.glob..func11.3() test/e2e/node/pre_stop.go:182 +0x11cfrom junit_01.xml
[BeforeEach] [sig-node] PreStop set up framework | framework.go:178 �[1mSTEP:�[0m Creating a kubernetes client �[38;5;243m01/14/23 06:34:14.956�[0m Jan 14 06:34:14.957: INFO: >>> kubeConfig: /root/.kube/config �[1mSTEP:�[0m Building a namespace api object, basename prestop �[38;5;243m01/14/23 06:34:14.958�[0m �[1mSTEP:�[0m Waiting for a default service account to be provisioned in namespace �[38;5;243m01/14/23 06:34:15.273�[0m �[1mSTEP:�[0m Waiting for kube-root-ca.crt to be provisioned in namespace �[38;5;243m01/14/23 06:34:15.482�[0m [BeforeEach] [sig-node] PreStop test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-node] PreStop test/e2e/node/pre_stop.go:159 [It] graceful pod terminated should wait until preStop hook completes the process test/e2e/node/pre_stop.go:172 �[1mSTEP:�[0m creating the pod �[38;5;243m01/14/23 06:34:15.69�[0m �[1mSTEP:�[0m submitting the pod to kubernetes �[38;5;243m01/14/23 06:34:15.69�[0m �[1mSTEP:�[0m waiting for pod running �[38;5;243m01/14/23 06:34:15.798�[0m Jan 14 06:34:15.798: INFO: Waiting up to 5m0s for pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8" in namespace "prestop-232" to be "running" Jan 14 06:34:15.905: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 106.114402ms Jan 14 06:34:18.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.21161352s Jan 14 06:34:20.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4.211506646s Jan 14 06:34:22.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 6.210945152s Jan 14 06:34:24.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 8.211351133s Jan 14 06:34:26.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 10.211212393s Jan 14 06:34:28.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 12.211606188s Jan 14 06:34:30.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 14.211296887s Jan 14 06:34:32.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 16.210702749s Jan 14 06:34:34.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 18.211115113s Jan 14 06:34:36.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 20.211393108s Jan 14 06:34:38.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 22.211553227s Jan 14 06:34:40.011: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 24.212711437s Jan 14 06:34:42.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 26.210681723s Jan 14 06:34:44.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 28.211661046s Jan 14 06:34:46.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 30.211345518s Jan 14 06:34:48.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 32.211044794s Jan 14 06:34:50.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 34.211305064s Jan 14 06:34:52.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 36.211615039s Jan 14 06:34:54.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 38.211213551s Jan 14 06:34:56.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 40.211514914s Jan 14 06:34:58.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 42.210899085s Jan 14 06:35:00.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 44.21145573s Jan 14 06:35:02.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 46.211011192s Jan 14 06:35:04.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 48.210816795s Jan 14 06:35:06.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 50.211615856s Jan 14 06:35:08.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 52.211711954s Jan 14 06:35:10.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 54.211677965s Jan 14 06:35:12.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 56.211746329s Jan 14 06:35:14.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 58.211618186s Jan 14 06:35:16.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.211617407s Jan 14 06:35:18.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.211427587s Jan 14 06:35:20.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.211128613s Jan 14 06:35:22.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.210781508s Jan 14 06:35:24.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.211476648s Jan 14 06:35:26.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.211498601s Jan 14 06:35:28.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.211503267s Jan 14 06:35:30.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.211727641s Jan 14 06:35:32.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.210657445s Jan 14 06:35:34.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.21130721s Jan 14 06:35:36.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.211124437s Jan 14 06:35:38.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.211492694s Jan 14 06:35:40.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.211091923s Jan 14 06:35:42.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.210551626s Jan 14 06:35:44.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.211163291s Jan 14 06:35:46.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.210489361s Jan 14 06:35:48.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.211389874s Jan 14 06:35:50.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.211099925s Jan 14 06:35:52.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.210988519s Jan 14 06:35:54.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.211602826s Jan 14 06:35:56.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.211282594s Jan 14 06:35:58.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.211543982s Jan 14 06:36:00.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.211301958s Jan 14 06:36:02.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.211212615s Jan 14 06:36:04.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.211720695s Jan 14 06:36:06.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.211280657s Jan 14 06:36:08.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.211057123s Jan 14 06:36:10.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.211590355s Jan 14 06:36:12.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.211592207s Jan 14 06:36:14.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.211479018s Jan 14 06:36:16.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.211879741s Jan 14 06:36:18.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m2.211850665s Jan 14 06:36:20.011: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m4.212427682s Jan 14 06:36:22.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m6.21095041s Jan 14 06:36:24.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m8.211413284s Jan 14 06:36:26.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m10.211494985s Jan 14 06:36:28.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m12.211356786s Jan 14 06:36:30.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m14.21146201s Jan 14 06:36:32.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m16.211479454s Jan 14 06:36:34.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m18.211149444s Jan 14 06:36:36.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m20.210781658s Jan 14 06:36:38.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m22.210815277s Jan 14 06:36:40.013: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m24.214499003s Jan 14 06:36:42.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m26.210678613s Jan 14 06:36:44.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m28.211319577s Jan 14 06:36:46.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m30.211514931s Jan 14 06:36:48.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m32.210534317s Jan 14 06:36:50.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m34.211304079s Jan 14 06:36:52.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m36.210737785s Jan 14 06:36:54.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m38.211616935s Jan 14 06:36:56.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m40.211867904s Jan 14 06:36:58.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m42.211616369s Jan 14 06:37:00.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m44.211342817s Jan 14 06:37:02.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m46.211430425s Jan 14 06:37:04.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m48.21169575s Jan 14 06:37:06.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m50.211154825s Jan 14 06:37:08.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m52.211256083s Jan 14 06:37:10.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m54.211355836s Jan 14 06:37:12.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m56.21158701s Jan 14 06:37:14.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 2m58.211677286s Jan 14 06:37:16.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m0.211306438s Jan 14 06:37:18.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m2.210916428s Jan 14 06:37:20.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m4.211129636s Jan 14 06:37:22.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m6.210812521s Jan 14 06:37:24.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m8.211667141s Jan 14 06:37:26.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m10.211139047s Jan 14 06:37:28.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m12.211538554s Jan 14 06:37:30.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m14.211302674s Jan 14 06:37:32.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m16.211485238s Jan 14 06:37:34.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m18.211361293s Jan 14 06:37:36.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m20.211414688s Jan 14 06:37:38.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m22.211774351s Jan 14 06:37:40.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m24.211636116s Jan 14 06:37:42.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m26.210522377s Jan 14 06:37:44.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m28.211310292s Jan 14 06:37:46.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m30.211595224s Jan 14 06:37:48.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m32.211422884s Jan 14 06:37:50.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m34.211392187s Jan 14 06:37:52.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m36.211395099s Jan 14 06:37:54.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m38.210753293s Jan 14 06:37:56.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m40.211354177s Jan 14 06:37:58.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m42.211147389s Jan 14 06:38:00.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m44.211183989s Jan 14 06:38:02.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m46.210799331s Jan 14 06:38:04.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m48.211638045s Jan 14 06:38:06.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m50.211460829s Jan 14 06:38:08.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m52.211555493s Jan 14 06:38:10.011: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m54.212231716s Jan 14 06:38:12.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m56.210610186s Jan 14 06:38:14.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 3m58.211143068s Jan 14 06:38:16.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m0.210760772s Jan 14 06:38:18.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m2.211489693s Jan 14 06:38:20.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m4.21140165s Jan 14 06:38:22.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m6.211770721s Jan 14 06:38:24.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m8.211533591s Jan 14 06:38:26.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m10.21157347s Jan 14 06:38:28.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m12.211475624s Jan 14 06:38:30.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m14.21120515s Jan 14 06:38:32.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m16.21073148s Jan 14 06:38:34.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m18.211524153s Jan 14 06:38:36.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m20.211290075s Jan 14 06:38:38.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m22.211315943s Jan 14 06:38:40.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m24.211267686s Jan 14 06:38:42.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m26.211003504s Jan 14 06:38:44.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m28.211485661s Jan 14 06:38:46.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m30.21116518s Jan 14 06:38:48.012: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m32.213443889s Jan 14 06:38:50.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m34.211377799s Jan 14 06:38:52.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m36.21112624s Jan 14 06:38:54.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m38.211307755s Jan 14 06:38:56.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m40.21115598s Jan 14 06:38:58.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m42.211467312s Jan 14 06:39:00.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m44.211653918s Jan 14 06:39:02.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m46.210613877s Jan 14 06:39:04.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m48.211357038s Jan 14 06:39:06.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m50.211456931s Jan 14 06:39:08.017: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m52.218981601s Jan 14 06:39:10.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m54.210799482s Jan 14 06:39:12.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m56.210927444s Jan 14 06:39:14.009: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 4m58.211068771s Jan 14 06:39:16.010: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.211696271s Jan 14 06:39:16.115: INFO: Pod "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.316425879s Jan 14 06:39:16.116: INFO: Unexpected error: <*pod.timeoutError | 0xc003c1b6e0>: { msg: "timed out while waiting for pod prestop-232/pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8 to be running", observedObjects: [ <*v1.Pod | 0xc000077200>{ TypeMeta: {Kind: "", APIVersion: ""}, ObjectMeta: { Name: "pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8", GenerateName: "", Namespace: "prestop-232", SelfLink: "", UID: "46187fc0-ee36-431f-842d-b1a0af9092c1", ResourceVersion: "13786", Generation: 0, CreationTimestamp: { Time: { wall: 0, ext: 63809274855, loc: { name: "Local", zone: [ {name: "UTC", offset: 0, isDST: false}, ], tx: [ { when: -576460752303423488, index: 0, isstd: false, isutc: false, }, ], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: "UTC", offset: 0, isDST: false}, }, }, }, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, Annotations: nil, OwnerReferences: nil, Finalizers: nil, ManagedFields: [ { Manager: "e2e.test", Operation: "Update", APIVersion: "v1", Time: { Time: { wall: 0, ext: 63809274855, loc: { name: "Local", zone: [...], tx: [...], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: ..., offset: ..., isDST: ...}, }, }, }, FieldsType: "FieldsV1", FieldsV1: { Raw: "{\"f:spec\":{\"f:containers\":{\"k:{\\\"name\\\":\\\"nginx\\\"}\":{\".\":{},\"f:image\":{},\"f:imagePullPolicy\":{},\"f:lifecycle\":{\".\":{},\"f:preStop\":{\".\":{},\"f:exec\":{\".\":{},\"f:command\":{}}}},\"f:name\":{},\"f:resources\":{},\"f:terminationMessagePath\":{},\"f:terminationMessagePolicy\":{}}},\"f:dnsPolicy\":{},\"f:enableServiceLinks\":{},\"f:restartPolicy\":{},\"f:schedulerName\":{},\"f:securityContext\":{},\"f:terminationGracePeriodSeconds\":{}}}", }, Subresource: "", }, { Manager: "kubelet", Operation: "Update", APIVersion: "v1", Time: { Time: { wall: 0, ext: 63809274855, loc: { name: "Local", zone: [...], tx: [...], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: ..., offset: ..., isDST: ...}, }, }, }, FieldsType: "FieldsV1", FieldsV1: { Raw: "{\"f:status\":{\"f:conditions\":{\"k:{\\\"type\\\":\\\"ContainersReady\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:message\":{},\"f:reason\":{},\"f:status\":{},\"f:type\":{}},\"k:{\\\"type\\\":\\\"Initialized\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:status\":{},\"f:type\":{}},\"k:{\\\"type\\\":\\\"Ready\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:message\":{},\"f:reason\":{},\"f:status\":{},\"f:type\":{}}},\"f:containerStatuses\":{},\"f:hostIP\":{},\"f:startTime\":{}}}", }, Subresource: "status", }, ], }, Spec: { Volumes: [ { Name: "kube-api-access-77rp2", VolumeSource: { HostPath: nil, EmptyDir: nil, GCEPersistentDisk: nil, AWSElasticBlockStore: nil, GitRepo: nil, Secret: nil, NFS: nil, ISCSI: nil, Glusterfs: nil, PersistentVolumeClaim: nil, RBD: nil, FlexVolume: nil, Cinder: nil, CephFS: nil, Flocker: nil, DownwardAPI: nil, FC: nil, AzureFile: nil, ConfigMap: nil, VsphereVolume: nil, Quobyte: nil, AzureDisk: nil, PhotonPersistentDisk: nil, Projected: { Sources: [ { Secret: ..., DownwardAPI: ..., ConfigMap: ..., ServiceAccountToken: ..., }, { Secret: ..., DownwardAPI: ..., ConfigMap: ..., ServiceAccountToken: ..., }, { Secret: ..., DownwardAPI: ..., ConfigMap: ..., ServiceAccountToken: ..., }, ], DefaultMode: 420, }, Portworx... Gomega truncated this representation as it exceeds 'format.MaxLength'. Consider having the object provide a custom 'GomegaStringer' representation or adjust the parameters in Gomega's 'format' package. Learn more here: https://onsi.github.io/gomega/#adjusting-output Jan 14 06:39:16.116: FAIL: timed out while waiting for pod prestop-232/pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8 to be running Full Stack Trace k8s.io/kubernetes/test/e2e/node.glob..func11.3() test/e2e/node/pre_stop.go:182 +0x11c [AfterEach] [sig-node] PreStop test/e2e/framework/node/init/init.go:32 Jan 14 06:39:16.116: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] PreStop test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] PreStop dump namespaces | framework.go:196 �[1mSTEP:�[0m dump namespace information after failure �[38;5;243m01/14/23 06:39:16.222�[0m �[1mSTEP:�[0m Collecting events from namespace "prestop-232". �[38;5;243m01/14/23 06:39:16.222�[0m �[1mSTEP:�[0m Found 11 events. �[38;5;243m01/14/23 06:39:16.327�[0m Jan 14 06:39:16.327: INFO: At 2023-01-14 06:34:15 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {default-scheduler } Scheduled: Successfully assigned prestop-232/pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8 to i-0526f6963633e8375 Jan 14 06:39:16.327: INFO: At 2023-01-14 06:34:16 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "1297461bb5955c6b70ff0ff8e1d08a4e4603462be37a28719cce605c6b10cdac": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:34:27 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "1b6229c5f60aeccef42cf4ab75ca3f2be722eaad08f0e13e5e7f072912da68bb": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:34:38 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "26e34e79761f0b1f7615de9f3278b29eb5c71f3f0766b15b3698220c32a5054d": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:34:50 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5c82077e43a79035804f8197af15126237fe6b7cc34757490260ae0e9289933f": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:35:05 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "6d7c403d9bca9b8afc3ff941f424075ea8f700cd5ae719ee4ebce5f0e7d70c9f": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:35:19 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "12f594a537c98014d30cf076dc91bcd038e30cd6c5cfa0ca85c21bcb6f7bd192": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:35:31 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c838509ce2c4e2c636031bd97756b3e26d173cb1d7d558991b9c04a8fbd69699": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:35:46 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "2619b828f5433c74a5117f287c0b23c7e9779d2846911b0248a0ab5624fa2602": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:35:59 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "fea682855f9a5db30fbf34d6b33d891dbf54f52b1d4ec7c8d2d7face99798936": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.327: INFO: At 2023-01-14 06:36:14 +0000 UTC - event for pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8: {kubelet i-0526f6963633e8375} FailedCreatePodSandBox: (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4ae8004c7e7b4e0d1e89d6226c951fac7e0d09b14e2d4bfa62e798c7a0630f0e": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:39:16.432: INFO: POD NODE PHASE GRACE CONDITIONS Jan 14 06:39:16.432: INFO: pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8 i-0526f6963633e8375 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:15 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:15 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:15 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-01-14 06:34:15 +0000 UTC }] Jan 14 06:39:16.432: INFO: Jan 14 06:39:16.541: INFO: Unable to fetch prestop-232/pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8/nginx logs: the server rejected our request for an unknown reason (get pods pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8) Jan 14 06:39:16.646: INFO: Logging node info for node i-0526f6963633e8375 Jan 14 06:39:16.750: INFO: Node Info: &Node{ObjectMeta:{i-0526f6963633e8375 b8bbb07c-e234-4117-968a-d4f54d957b46 15086 0 2023-01-14 06:26:11 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a io.kubernetes.storage.mock/node:some-mock-node kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0526f6963633e8375 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0526f6963633e8375 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.59.50 csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-4581":"i-0526f6963633e8375","ebs.csi.aws.com":"i-0526f6963633e8375"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.4.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:38:03 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:io.kubernetes.storage.mock/node":{},"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.4.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0526f6963633e8375,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.4.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:11 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:38:03 +0000 UTC,LastTransitionTime:2023-01-14 06:26:23 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.59.50,},NodeAddress{Type:ExternalIP,Address:13.38.88.176,},NodeAddress{Type:InternalDNS,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0526f6963633e8375.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-88-176.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec24f53d700a2e9399be7e5e2cc1e943,SystemUUID:ec24f53d-700a-2e93-99be-7e5e2cc1e943,BootID:58d231c1-9ab3-4e54-9948-319cfad92d73,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[gcr.io/authenticated-image-pulling/alpine@sha256:7ff177862cb50c602bfe81f805969412e619c054a2bbead977d0c276988aa4a0 gcr.io/authenticated-image-pulling/alpine:3.7],SizeBytes:2110879,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:39:16.751: INFO: Logging kubelet events for node i-0526f6963633e8375 Jan 14 06:39:16.858: INFO: Logging pods the kubelet thinks is on node i-0526f6963633e8375 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-v4q7f started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-l2cw9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-p4cww started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: hostexec-i-0526f6963633e8375-7mnst started at 2023-01-14 06:32:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ts46q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-kvr9w started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6xkx2 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:36 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:16.972: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:39:16.972: INFO: Container driver-registrar ready: true, restart count 0 Jan 14 06:39:16.972: INFO: Container mock ready: true, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-crc8k started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: netserver-0 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container webserver ready: true, restart count 0 Jan 14 06:39:16.972: INFO: pod-subpath-test-preprovisionedpv-2gt8 started at 2023-01-14 06:37:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container test-container-subpath-preprovisionedpv-2gt8 ready: false, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-66vns started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:33:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:39:16.972: INFO: security-context-9dbfb503-c405-47ac-9294-6ea974357a5f started at 2023-01-14 06:38:37 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container test-container ready: false, restart count 0 Jan 14 06:39:16.972: INFO: pod-prestop-hook-9eff2a9c-a633-41f2-beb1-eea670f0bec8 started at 2023-01-14 06:34:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container nginx ready: false, restart count 0 Jan 14 06:39:16.972: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8z92 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: hostexec-i-0526f6963633e8375-kpqf6 started at 2023-01-14 06:33:54 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:39:16.972: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-mh56b started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: update-demo-nautilus-mrn8r started at 2023-01-14 06:38:46 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container update-demo ready: false, restart count 0 Jan 14 06:39:16.972: INFO: ebs-csi-node-r8qfk started at 2023-01-14 06:26:12 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:16.972: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:16.972: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:16.972: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:16.972: INFO: csi-mockplugin-0 started at 2023-01-14 06:35:25 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:16.972: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:16.972: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:39:16.972: INFO: Container mock ready: false, restart count 0 Jan 14 06:39:16.972: INFO: bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb started at 2023-01-14 06:38:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Container bin-false7432aa71-39a5-4494-a6d9-966c24fb52fb ready: false, restart count 0 Jan 14 06:39:16.972: INFO: cilium-tv25q started at 2023-01-14 06:26:12 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:16.972: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:16.972: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:17.397: INFO: Latency metrics for node i-0526f6963633e8375 Jan 14 06:39:17.397: INFO: Logging node info for node i-06bd219a44e00580c Jan 14 06:39:17.502: INFO: Node Info: &Node{ObjectMeta:{i-06bd219a44e00580c c2a57daf-87e6-4c31-ab8d-158cf1752c85 14461 0 2023-01-14 06:26:09 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-06bd219a44e00580c kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-06bd219a44e00580c topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.61.252 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-06bd219a44e00580c"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.3.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:35:28 +0000 UTC FieldsV1 {"f:status":{"f:volumesAttached":{}}} status} {kubelet Update v1 2023-01-14 06:35:31 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.3.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-06bd219a44e00580c,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.3.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:09 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:35:31 +0000 UTC,LastTransitionTime:2023-01-14 06:26:21 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.61.252,},NodeAddress{Type:ExternalIP,Address:15.237.110.205,},NodeAddress{Type:InternalDNS,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-06bd219a44e00580c.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-110-205.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec28b615c9f51208890a610e546cafd1,SystemUUID:ec28b615-c9f5-1208-890a-610e546cafd1,BootID:9cfe2407-336f-468c-b599-1b87cbc71140,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847 kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4 kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-00276aca89d701847,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-0cdf851187e561b92,DevicePath:,},AttachedVolume{Name:kubernetes.io/csi/ebs.csi.aws.com^vol-010cadf2aee76ead4,DevicePath:,},},Config:nil,},} Jan 14 06:39:17.503: INFO: Logging kubelet events for node i-06bd219a44e00580c Jan 14 06:39:17.610: INFO: Logging pods the kubelet thinks is on node i-06bd219a44e00580c Jan 14 06:39:17.727: INFO: pod-disruption-failure-ignore-1-frbqx started at 2023-01-14 06:34:08 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container c ready: false, restart count 0 Jan 14 06:39:17.727: INFO: test-ss-0 started at 2023-01-14 06:34:36 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container webserver ready: false, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-jzs4v started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: hostexec-i-06bd219a44e00580c-vkgh8 started at 2023-01-14 06:39:10 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container agnhost-container ready: true, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dbmt8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: pod-disruption-failure-ignore-0-dbk78 started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container c ready: true, restart count 0 Jan 14 06:39:17.727: INFO: var-expansion-5d68387b-eca3-4e4a-a1bc-06454f2a1ada started at 2023-01-14 06:35:23 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container dapi-container ready: false, restart count 0 Jan 14 06:39:17.727: INFO: pod-disruption-failure-ignore-1-f5m4k started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container c ready: false, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rlwrq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ztghd started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: update-demo-nautilus-wzc78 started at 2023-01-14 06:38:46 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container update-demo ready: false, restart count 0 Jan 14 06:39:17.727: INFO: pod-disruption-failure-ignore-1-xjff6 started at 2023-01-14 06:34:27 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container c ready: false, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-6nwrq started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: pod-disruption-failure-ignore-1-6gwgt started at 2023-01-14 06:33:22 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container c ready: false, restart count 0 Jan 14 06:39:17.727: INFO: pod-db681d65-3e7a-406a-aa51-b57c4ce4869e started at 2023-01-14 06:39:09 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container test-container ready: false, restart count 0 Jan 14 06:39:17.727: INFO: pod-disruption-failure-ignore-1-ghlhh started at 2023-01-14 06:34:12 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container c ready: false, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zbhxz started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:13 +0000 UTC (0+7 container statuses recorded) Jan 14 06:39:17.727: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:39:17.727: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:17.727: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:39:17.727: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:39:17.727: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:39:17.727: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:39:17.727: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:39:17.727: INFO: cilium-k6c6s started at 2023-01-14 06:26:10 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:17.727: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q2x2n started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: ebs-csi-node-62qzb started at 2023-01-14 06:26:10 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:17.727: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:17.727: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:17.727: INFO: netserver-1 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container webserver ready: true, restart count 0 Jan 14 06:39:17.727: INFO: liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 started at 2023-01-14 06:39:09 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container agnhost-container ready: false, restart count 0 Jan 14 06:39:17.727: INFO: pod-disruption-failure-ignore-1-crpgt started at 2023-01-14 06:34:01 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container c ready: false, restart count 0 Jan 14 06:39:17.727: INFO: pod-edd099ae-7d89-4db8-9a86-b238ae68aba9 started at 2023-01-14 06:39:15 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container write-pod ready: false, restart count 0 Jan 14 06:39:17.727: INFO: pod-disruption-failure-ignore-1-5xwhv started at 2023-01-14 06:34:05 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container c ready: false, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-cf86c started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: inline-volume-tester-2fmpv started at 2023-01-14 06:34:58 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k776q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-gsthr started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:17.727: INFO: inline-volume-tester-v5nnb started at 2023-01-14 06:35:25 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:17.727: INFO: Container csi-volume-tester ready: false, restart count 0 Jan 14 06:39:18.145: INFO: Latency metrics for node i-06bd219a44e00580c Jan 14 06:39:18.145: INFO: Logging node info for node i-0930a50194a147b36 Jan 14 06:39:18.250: INFO: Node Info: &Node{ObjectMeta:{i-0930a50194a147b36 4316b3c5-1eeb-4ee2-9818-40f99d51117d 14934 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0930a50194a147b36 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-0930a50194a147b36 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.36.60 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0930a50194a147b36"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.1.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:37:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.1.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0930a50194a147b36,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.1.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051689472 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946831872 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:37:39 +0000 UTC,LastTransitionTime:2023-01-14 06:26:19 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.36.60,},NodeAddress{Type:ExternalIP,Address:15.237.49.122,},NodeAddress{Type:InternalDNS,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0930a50194a147b36.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-15-237-49-122.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2ac5dccb44f409fdc575df19a0b9a7,SystemUUID:ec2ac5dc-cb44-f409-fdc5-75df19a0b9a7,BootID:9dff06f2-e51d-4b5e-a657-e8f546eded95,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:68d396900aeaa072c1f27289485fdac29834045a6f3ffe369bf389d830ef572d registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.6],SizeBytes:20293261,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:39:18.250: INFO: Logging kubelet events for node i-0930a50194a147b36 Jan 14 06:39:18.359: INFO: Logging pods the kubelet thinks is on node i-0930a50194a147b36 Jan 14 06:39:18.469: INFO: cilium-75rxm started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:18.469: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-bb7qt started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g4xd9 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: ebs-csi-node-rpzft started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:18.469: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:18.469: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:18.469: INFO: coredns-559769c974-5xkn6 started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container coredns ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-g6wnp started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-zjjvx started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-pkvpm started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-slt2z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: coredns-autoscaler-7cb5c5b969-svc7j started at 2023-01-14 06:26:19 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container autoscaler ready: true, restart count 0 Jan 14 06:39:18.469: INFO: netserver-2 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container webserver ready: false, restart count 0 Jan 14 06:39:18.469: INFO: csi-mockplugin-0 started at 2023-01-14 06:37:42 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:18.469: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:18.469: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:39:18.469: INFO: Container mock ready: false, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-rfg9z started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-xxqp8 started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-q9h4x started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-dw26q started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:18.469: INFO: csi-mockplugin-attacher-0 started at 2023-01-14 06:37:43 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:18.469: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:39:18.921: INFO: Latency metrics for node i-0930a50194a147b36 Jan 14 06:39:18.921: INFO: Logging node info for node i-095cd924e787c9946 Jan 14 06:39:19.026: INFO: Node Info: &Node{ObjectMeta:{i-095cd924e787c9946 7ac98e5e-c131-42e0-a67e-ba9b45d163a4 15074 0 2023-01-14 06:26:06 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:t3.medium beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kubelet_cleanup:true kubernetes.io/arch:amd64 kubernetes.io/hostname:i-095cd924e787c9946 kubernetes.io/os:linux node-role.kubernetes.io/node: node.kubernetes.io/instance-type:t3.medium topology.ebs.csi.aws.com/zone:eu-west-3a topology.hostpath.csi/node:i-095cd924e787c9946 topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.51.27 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-095cd924e787c9946"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.2.0/24\"":{}}}} } {kubelet Update v1 2023-01-14 06:26:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kops-controller Update v1 2023-01-14 06:26:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:node-role.kubernetes.io/node":{}}}} } {e2e.test Update v1 2023-01-14 06:33:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kubelet_cleanup":{}}}} } {kubelet Update v1 2023-01-14 06:38:00 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{},"f:topology.hostpath.csi/node":{}}},"f:status":{"f:allocatable":{"f:ephemeral-storage":{}},"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.2.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-095cd924e787c9946,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[100.96.2.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{4051681280 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3946823680 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:38:00 +0000 UTC,LastTransitionTime:2023-01-14 06:26:20 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.51.27,},NodeAddress{Type:ExternalIP,Address:13.38.27.88,},NodeAddress{Type:InternalDNS,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-095cd924e787c9946.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-38-27-88.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec2449c051467854b20245f8e87294d1,SystemUUID:ec2449c0-5146-7854-b202-45f8e87294d1,BootID:11ed24c0-6b48-4372-960a-a4095c73f4ca,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/sample-apiserver@sha256:8d70890151aa5d096f331cb9da1b9cd5be0412b7363fe67b5c3befdcaa2a28d0 registry.k8s.io/e2e-test-images/sample-apiserver:1.17.7],SizeBytes:25667066,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:017727efcfeb7d053af68e51436ce8e65edbc6ca573720afb4f79c8594036955 registry.k8s.io/coredns/coredns:v1.10.0],SizeBytes:15273057,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:39:19.026: INFO: Logging kubelet events for node i-095cd924e787c9946 Jan 14 06:39:19.133: INFO: Logging pods the kubelet thinks is on node i-095cd924e787c9946 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-qnwnv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: csi-mockplugin-0 started at 2023-01-14 06:33:29 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:19.245: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:19.245: INFO: Container driver-registrar ready: false, restart count 0 Jan 14 06:39:19.245: INFO: Container mock ready: false, restart count 0 Jan 14 06:39:19.245: INFO: cilium-kpqdf started at 2023-01-14 06:26:07 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:19.245: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:19.245: INFO: netserver-3 started at 2023-01-14 06:33:56 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container webserver ready: true, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-7bdwh started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: false, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-94lp8 started at 2023-01-14 06:33:14 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: test-pod started at 2023-01-14 06:38:30 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container webserver ready: false, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k8pkw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-d9fvw started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-8x9bt started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-ppzfj started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: csi-hostpathplugin-0 started at 2023-01-14 06:34:34 +0000 UTC (0+7 container statuses recorded) Jan 14 06:39:19.245: INFO: Container csi-attacher ready: false, restart count 0 Jan 14 06:39:19.245: INFO: Container csi-provisioner ready: false, restart count 0 Jan 14 06:39:19.245: INFO: Container csi-resizer ready: false, restart count 0 Jan 14 06:39:19.245: INFO: Container csi-snapshotter ready: false, restart count 0 Jan 14 06:39:19.245: INFO: Container hostpath ready: false, restart count 0 Jan 14 06:39:19.245: INFO: Container liveness-probe ready: false, restart count 0 Jan 14 06:39:19.245: INFO: Container node-driver-registrar ready: false, restart count 0 Jan 14 06:39:19.245: INFO: coredns-559769c974-lpb2c started at 2023-01-14 06:26:42 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container coredns ready: true, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-k5vzv started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-79v7d started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: csi-mockplugin-resizer-0 started at 2023-01-14 06:33:29 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:39:19.245: INFO: ebs-csi-node-q6j9r started at 2023-01-14 06:26:07 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:19.245: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:19.245: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:19.245: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:19.245: INFO: cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe-hghxq started at 2023-01-14 06:33:13 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:19.245: INFO: Container cleanup40-1f08417c-5ea7-4258-b2bd-e72a6bef6ffe ready: true, restart count 0 Jan 14 06:39:19.793: INFO: Latency metrics for node i-095cd924e787c9946 Jan 14 06:39:19.793: INFO: Logging node info for node i-0ea715ad3f7d7c666 Jan 14 06:39:19.898: INFO: Node Info: &Node{ObjectMeta:{i-0ea715ad3f7d7c666 1b9ffdb4-6e31-4298-bf35-45383b8cddd4 14687 0 2023-01-14 06:24:19 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:c5.large beta.kubernetes.io/os:linux failure-domain.beta.kubernetes.io/region:eu-west-3 failure-domain.beta.kubernetes.io/zone:eu-west-3a kops.k8s.io/kops-controller-pki: kubernetes.io/arch:amd64 kubernetes.io/hostname:i-0ea715ad3f7d7c666 kubernetes.io/os:linux node-role.kubernetes.io/control-plane: node.kubernetes.io/exclude-from-external-load-balancers: node.kubernetes.io/instance-type:c5.large topology.ebs.csi.aws.com/zone:eu-west-3a topology.kubernetes.io/region:eu-west-3 topology.kubernetes.io/zone:eu-west-3a] map[alpha.kubernetes.io/provided-node-ip:172.20.43.108 csi.volume.kubernetes.io/nodeid:{"ebs.csi.aws.com":"i-0ea715ad3f7d7c666"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2023-01-14 06:24:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:alpha.kubernetes.io/provided-node-ip":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}} } {protokube Update v1 2023-01-14 06:24:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:kops.k8s.io/kops-controller-pki":{},"f:node-role.kubernetes.io/control-plane":{},"f:node.kubernetes.io/exclude-from-external-load-balancers":{}}}} } {kube-controller-manager Update v1 2023-01-14 06:25:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"100.96.0.0/24\"":{}}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{"f:beta.kubernetes.io/instance-type":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:taints":{}}} } {aws-cloud-controller-manager Update v1 2023-01-14 06:25:14 +0000 UTC FieldsV1 {"f:status":{"f:addresses":{"k:{\"type\":\"ExternalDNS\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"ExternalIP\"}":{".":{},"f:address":{},"f:type":{}},"k:{\"type\":\"Hostname\"}":{"f:address":{}},"k:{\"type\":\"InternalDNS\"}":{".":{},"f:address":{},"f:type":{}}}}} status} {kubelet Update v1 2023-01-14 06:36:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.ebs.csi.aws.com/zone":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:100.96.0.0/24,DoNotUseExternalID:,ProviderID:aws:///eu-west-3a/i-0ea715ad3f7d7c666,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/control-plane,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[100.96.0.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{49753808896 0} {<nil>} BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3892301824 0} {<nil>} 3801076Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{44778427933 0} {<nil>} 44778427933 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3787444224 0} {<nil>} 3698676Ki BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:24:16 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2023-01-14 06:36:33 +0000 UTC,LastTransitionTime:2023-01-14 06:25:06 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.20.43.108,},NodeAddress{Type:ExternalIP,Address:13.37.224.194,},NodeAddress{Type:InternalDNS,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:Hostname,Address:i-0ea715ad3f7d7c666.eu-west-3.compute.internal,},NodeAddress{Type:ExternalDNS,Address:ec2-13-37-224-194.eu-west-3.compute.amazonaws.com,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ec29ac40ecac712560f472ac147406f5,SystemUUID:ec29ac40-ecac-7125-60f4-72ac147406f5,BootID:6aefaddb-a8fb-42ca-b933-086be838242c,KernelVersion:5.15.0-1026-aws,OSImage:Ubuntu 20.04.5 LTS,ContainerRuntimeVersion:containerd://1.6.15,KubeletVersion:v1.26.0,KubeProxyVersion:v1.26.0,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/etcdadm/etcd-manager@sha256:66a453db625abb268f4b3bbefc5a34a171d81e6e8796cecca54cfd71775c77c4 registry.k8s.io/etcdadm/etcd-manager:v3.0.20221209],SizeBytes:231502799,},ContainerImage{Names:[quay.io/cilium/cilium@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5 quay.io/cilium/cilium:v1.12.5],SizeBytes:166719855,},ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.26.0],SizeBytes:135162323,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.26.0],SizeBytes:124991801,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.26.0],SizeBytes:67205320,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.26.0],SizeBytes:57657656,},ContainerImage{Names:[registry.k8s.io/kops/kops-controller:1.27.0-alpha.1],SizeBytes:43455400,},ContainerImage{Names:[registry.k8s.io/kops/dns-controller:1.27.0-alpha.1],SizeBytes:42802033,},ContainerImage{Names:[registry.k8s.io/provider-aws/aws-ebs-csi-driver@sha256:f0c5de192d832e7c1daa6580d4a62e8fa6fc8eabc0917ae4cb7ed4d15e95b59e registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.14.1],SizeBytes:29725845,},ContainerImage{Names:[quay.io/cilium/operator@sha256:a6d24a006a6b92967ac90786b49bc1ac26e5477cf028cd1186efcfc2466484db quay.io/cilium/operator:v1.12.5],SizeBytes:26802430,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 registry.k8s.io/sig-storage/csi-provisioner:v3.1.0],SizeBytes:23345856,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 registry.k8s.io/sig-storage/csi-resizer:v1.4.0],SizeBytes:22381475,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b registry.k8s.io/sig-storage/csi-attacher:v3.4.0],SizeBytes:22085298,},ContainerImage{Names:[registry.k8s.io/provider-aws/cloud-controller-manager@sha256:fdeb61e3e42ecd9cca868d550ebdb88dd6341d9e91fcfa9a37e227dab2ad22cb registry.k8s.io/provider-aws/cloud-controller-manager:v1.26.0],SizeBytes:20154862,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:406f59599991916d2942d8d02f076d957ed71b541ee19f09fc01723a6e6f5932 registry.k8s.io/sig-storage/livenessprobe:v2.6.0],SizeBytes:8240918,},ContainerImage{Names:[registry.k8s.io/kops/kube-apiserver-healthcheck:1.27.0-alpha.1],SizeBytes:4967345,},ContainerImage{Names:[registry.k8s.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db registry.k8s.io/pause:3.6],SizeBytes:301773,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Jan 14 06:39:19.898: INFO: Logging kubelet events for node i-0ea715ad3f7d7c666 Jan 14 06:39:20.007: INFO: Logging pods the kubelet thinks is on node i-0ea715ad3f7d7c666 Jan 14 06:39:20.124: INFO: ebs-csi-node-knngk started at 2023-01-14 06:24:57 +0000 UTC (0+3 container statuses recorded) Jan 14 06:39:20.124: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:20.124: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:20.124: INFO: Container node-driver-registrar ready: true, restart count 0 Jan 14 06:39:20.124: INFO: dns-controller-69987775c6-66b5p started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Container dns-controller ready: true, restart count 0 Jan 14 06:39:20.124: INFO: ebs-csi-controller-5bd98b456f-zxg2l started at 2023-01-14 06:24:57 +0000 UTC (0+5 container statuses recorded) Jan 14 06:39:20.124: INFO: Container csi-attacher ready: true, restart count 0 Jan 14 06:39:20.124: INFO: Container csi-provisioner ready: true, restart count 0 Jan 14 06:39:20.124: INFO: Container csi-resizer ready: true, restart count 0 Jan 14 06:39:20.124: INFO: Container ebs-plugin ready: true, restart count 0 Jan 14 06:39:20.124: INFO: Container liveness-probe ready: true, restart count 0 Jan 14 06:39:20.124: INFO: etcd-manager-main-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:39:20.124: INFO: kube-apiserver-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+2 container statuses recorded) Jan 14 06:39:20.124: INFO: Container healthcheck ready: true, restart count 0 Jan 14 06:39:20.124: INFO: Container kube-apiserver ready: true, restart count 1 Jan 14 06:39:20.124: INFO: kube-controller-manager-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Container kube-controller-manager ready: true, restart count 2 Jan 14 06:39:20.124: INFO: kube-scheduler-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Container kube-scheduler ready: true, restart count 0 Jan 14 06:39:20.124: INFO: cilium-vl5tq started at 2023-01-14 06:24:57 +0000 UTC (1+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Init container clean-cilium-state ready: true, restart count 0 Jan 14 06:39:20.124: INFO: Container cilium-agent ready: true, restart count 0 Jan 14 06:39:20.124: INFO: etcd-manager-events-i-0ea715ad3f7d7c666 started at 2023-01-14 06:23:47 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Container etcd-manager ready: true, restart count 0 Jan 14 06:39:20.124: INFO: kops-controller-8ntms started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Container kops-controller ready: true, restart count 0 Jan 14 06:39:20.124: INFO: aws-cloud-controller-manager-8g49k started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Container aws-cloud-controller-manager ready: true, restart count 0 Jan 14 06:39:20.124: INFO: cilium-operator-5dd44dc49f-hdhf7 started at 2023-01-14 06:24:57 +0000 UTC (0+1 container statuses recorded) Jan 14 06:39:20.124: INFO: Container cilium-operator ready: true, restart count 0 Jan 14 06:39:20.586: INFO: Latency metrics for node i-0ea715ad3f7d7c666 [DeferCleanup (Each)] [sig-node] PreStop tear down framework | framework.go:193 �[1mSTEP:�[0m Destroying namespace "prestop-232" for this suite. �[38;5;243m01/14/23 06:39:20.586�[0m
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-node\]\sProbing\scontainer\sshould\shave\smonotonically\sincreasing\srestart\scount\s\[NodeConformance\]\s\[Conformance\]$'
test/e2e/common/node/container_probe.go:955 k8s.io/kubernetes/test/e2e/common/node.RunLivenessTest(0xc000b942d0, 0xc002f72480, 0x5, 0xc0014db240?) test/e2e/common/node/container_probe.go:955 +0x39a k8s.io/kubernetes/test/e2e/common/node.glob..func2.8() test/e2e/common/node/container_probe.go:207 +0x117
[BeforeEach] [sig-node] Probing container set up framework | framework.go:178 �[1mSTEP:�[0m Creating a kubernetes client �[38;5;243m01/14/23 06:39:09.107�[0m Jan 14 06:39:09.108: INFO: >>> kubeConfig: /root/.kube/config �[1mSTEP:�[0m Building a namespace api object, basename container-probe �[38;5;243m01/14/23 06:39:09.109�[0m �[1mSTEP:�[0m Waiting for a default service account to be provisioned in namespace �[38;5;243m01/14/23 06:39:09.423�[0m �[1mSTEP:�[0m Waiting for kube-root-ca.crt to be provisioned in namespace �[38;5;243m01/14/23 06:39:09.629�[0m [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-node] Probing container test/e2e/common/node/container_probe.go:63 [It] should have monotonically increasing restart count [NodeConformance] [Conformance] test/e2e/common/node/container_probe.go:199 �[1mSTEP:�[0m Creating pod liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 in namespace container-probe-9484 �[38;5;243m01/14/23 06:39:09.836�[0m Jan 14 06:39:09.944: INFO: Waiting up to 5m0s for pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1" in namespace "container-probe-9484" to be "not pending" Jan 14 06:39:10.048: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 103.560187ms Jan 14 06:39:12.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.20826197s Jan 14 06:39:14.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4.207960235s Jan 14 06:39:16.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.208027777s Jan 14 06:39:18.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 8.207796414s Jan 14 06:39:20.156: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 10.211841511s Jan 14 06:39:22.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 12.207816597s Jan 14 06:39:24.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 14.207958891s Jan 14 06:39:26.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 16.207602916s Jan 14 06:39:28.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 18.208439866s Jan 14 06:39:30.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 20.208312962s Jan 14 06:39:32.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 22.208412452s Jan 14 06:39:34.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 24.207753426s Jan 14 06:39:36.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 26.208249675s Jan 14 06:39:38.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 28.207478129s Jan 14 06:39:40.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 30.207536813s Jan 14 06:39:42.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 32.208710055s Jan 14 06:39:44.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 34.207438985s Jan 14 06:39:46.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 36.208036421s Jan 14 06:39:48.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 38.208872621s Jan 14 06:39:50.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 40.207870548s Jan 14 06:39:52.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 42.208256796s Jan 14 06:39:54.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 44.208436861s Jan 14 06:39:56.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 46.208342379s Jan 14 06:39:58.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 48.208033525s Jan 14 06:40:00.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 50.208277944s Jan 14 06:40:02.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 52.208563586s Jan 14 06:40:04.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 54.208036952s Jan 14 06:40:06.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 56.208095814s Jan 14 06:40:08.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 58.207615464s Jan 14 06:40:10.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.208333427s Jan 14 06:40:12.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.208125411s Jan 14 06:40:14.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.208691567s Jan 14 06:40:16.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.208205598s Jan 14 06:40:18.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.208078262s Jan 14 06:40:20.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.208149484s Jan 14 06:40:22.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.208363427s Jan 14 06:40:24.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.208397624s Jan 14 06:40:26.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.208267802s Jan 14 06:40:28.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.207669247s Jan 14 06:40:30.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.20838352s Jan 14 06:40:32.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.20848383s Jan 14 06:40:34.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.207955999s Jan 14 06:40:36.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.208058018s Jan 14 06:40:38.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.208292047s Jan 14 06:40:40.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.207750579s Jan 14 06:40:42.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.208694834s Jan 14 06:40:44.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.207778253s Jan 14 06:40:46.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.208140425s Jan 14 06:40:48.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.207666157s Jan 14 06:40:50.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.208022456s Jan 14 06:40:52.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.208236929s Jan 14 06:40:54.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.20837416s Jan 14 06:40:56.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.208366349s Jan 14 06:40:58.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.207779453s Jan 14 06:41:00.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.208593544s Jan 14 06:41:02.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.208340235s Jan 14 06:41:04.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.208189241s Jan 14 06:41:06.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.208035605s Jan 14 06:41:08.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.208246937s Jan 14 06:41:10.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.20824319s Jan 14 06:41:12.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m2.208433061s Jan 14 06:41:14.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m4.208487428s Jan 14 06:41:16.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m6.208238632s Jan 14 06:41:18.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m8.208012094s Jan 14 06:41:20.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m10.208148718s Jan 14 06:41:22.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m12.208225995s Jan 14 06:41:24.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m14.20847124s Jan 14 06:41:26.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m16.208200661s Jan 14 06:41:28.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m18.207609724s Jan 14 06:41:30.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m20.208455472s Jan 14 06:41:32.162: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m22.218335053s Jan 14 06:41:34.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m24.207968729s Jan 14 06:41:36.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m26.207961763s Jan 14 06:41:38.155: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m28.210945726s Jan 14 06:41:40.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m30.208115591s Jan 14 06:41:42.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m32.208161168s Jan 14 06:41:44.151: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m34.207266908s Jan 14 06:41:46.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m36.207837804s Jan 14 06:41:48.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m38.208104855s Jan 14 06:41:50.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m40.208020409s Jan 14 06:41:52.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m42.208029466s Jan 14 06:41:54.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m44.20820917s Jan 14 06:41:56.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m46.207695014s Jan 14 06:41:58.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m48.207715162s Jan 14 06:42:00.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m50.207666145s Jan 14 06:42:02.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m52.208161075s Jan 14 06:42:04.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m54.208565758s Jan 14 06:42:06.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m56.208037407s Jan 14 06:42:08.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 2m58.207455071s Jan 14 06:42:10.173: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m0.228719924s Jan 14 06:42:12.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m2.208220942s Jan 14 06:42:14.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m4.208458213s Jan 14 06:42:16.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m6.208415633s Jan 14 06:42:18.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m8.207990523s Jan 14 06:42:20.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m10.207519253s Jan 14 06:42:22.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m12.208101126s Jan 14 06:42:24.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m14.208789085s Jan 14 06:42:26.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m16.208062245s Jan 14 06:42:28.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m18.207411729s Jan 14 06:42:30.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m20.20792063s Jan 14 06:42:32.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m22.208046932s Jan 14 06:42:34.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m24.208005419s Jan 14 06:42:36.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m26.207884677s Jan 14 06:42:38.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m28.207873008s Jan 14 06:42:40.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m30.20789722s Jan 14 06:42:42.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m32.207949557s Jan 14 06:42:44.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m34.207773833s Jan 14 06:42:46.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m36.207984849s Jan 14 06:42:48.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m38.207908938s Jan 14 06:42:50.158: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m40.214046904s Jan 14 06:42:52.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m42.207623784s Jan 14 06:42:54.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m44.208165478s Jan 14 06:42:56.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m46.208378182s Jan 14 06:42:58.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m48.207931311s Jan 14 06:43:00.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m50.208190236s Jan 14 06:43:02.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m52.208188186s Jan 14 06:43:04.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m54.208232875s Jan 14 06:43:06.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m56.20787494s Jan 14 06:43:08.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 3m58.208225167s Jan 14 06:43:10.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m0.208480112s Jan 14 06:43:12.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m2.208504605s Jan 14 06:43:14.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m4.208740655s Jan 14 06:43:16.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m6.208539407s Jan 14 06:43:18.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m8.20797613s Jan 14 06:43:20.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m10.20812869s Jan 14 06:43:22.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m12.208169806s Jan 14 06:43:24.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m14.208330799s Jan 14 06:43:26.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m16.208512242s Jan 14 06:43:28.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m18.208564929s Jan 14 06:43:30.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m20.208212238s Jan 14 06:43:32.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m22.208291016s Jan 14 06:43:34.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m24.208353552s Jan 14 06:43:36.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m26.208126339s Jan 14 06:43:38.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m28.20776142s Jan 14 06:43:40.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m30.20774061s Jan 14 06:43:42.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m32.207869338s Jan 14 06:43:44.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m34.208098398s Jan 14 06:43:46.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m36.208367368s Jan 14 06:43:48.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m38.207975948s Jan 14 06:43:50.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m40.208440118s Jan 14 06:43:52.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m42.208156116s Jan 14 06:43:54.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m44.208250974s Jan 14 06:43:56.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m46.208253655s Jan 14 06:43:58.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m48.208063563s Jan 14 06:44:00.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m50.208312809s Jan 14 06:44:02.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m52.208770402s Jan 14 06:44:04.152: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m54.208096017s Jan 14 06:44:06.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m56.208803586s Jan 14 06:44:08.154: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 4m58.21029448s Jan 14 06:44:10.153: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.208406866s Jan 14 06:44:10.256: INFO: Pod "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.312085924s Jan 14 06:44:10.257: INFO: Unexpected error: starting pod liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 in namespace container-probe-9484: <*pod.timeoutError | 0xc000c35590>: { msg: "timed out while waiting for pod container-probe-9484/liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 to be not pending", observedObjects: [ <*v1.Pod | 0xc0023bf680>{ TypeMeta: {Kind: "", APIVersion: ""}, ObjectMeta: { Name: "liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1", GenerateName: "", Namespace: "container-probe-9484", SelfLink: "", UID: "590d60fc-4e60-40a6-9aac-a1a074d7d388", ResourceVersion: "15498", Generation: 0, CreationTimestamp: { Time: { wall: 0, ext: 63809275149, loc: { name: "Local", zone: [ {name: "UTC", offset: 0, isDST: false}, ], tx: [ { when: -576460752303423488, index: 0, isstd: false, isutc: false, }, ], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: "UTC", offset: 0, isDST: false}, }, }, }, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: {"test": "liveness"}, Annotations: nil, OwnerReferences: nil, Finalizers: nil, ManagedFields: [ { Manager: "e2e.test", Operation: "Update", APIVersion: "v1", Time: { Time: { wall: 0, ext: 63809275149, loc: { name: "Local", zone: [...], tx: [...], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: ..., offset: ..., isDST: ...}, }, }, }, FieldsType: "FieldsV1", FieldsV1: { Raw: "{\"f:metadata\":{\"f:labels\":{\".\":{},\"f:test\":{}}},\"f:spec\":{\"f:containers\":{\"k:{\\\"name\\\":\\\"agnhost-container\\\"}\":{\".\":{},\"f:args\":{},\"f:image\":{},\"f:imagePullPolicy\":{},\"f:livenessProbe\":{\".\":{},\"f:failureThreshold\":{},\"f:httpGet\":{\".\":{},\"f:path\":{},\"f:port\":{},\"f:scheme\":{}},\"f:initialDelaySeconds\":{},\"f:periodSeconds\":{},\"f:successThreshold\":{},\"f:timeoutSeconds\":{}},\"f:name\":{},\"f:resources\":{},\"f:securityContext\":{},\"f:terminationMessagePath\":{},\"f:terminationMessagePolicy\":{}}},\"f:dnsPolicy\":{},\"f:enableServiceLinks\":{},\"f:restartPolicy\":{},\"f:schedulerName\":{},\"f:securityContext\":{},\"f:terminationGracePeriodSeconds\":{}}}", }, Subresource: "", }, { Manager: "kubelet", Operation: "Update", APIVersion: "v1", Time: { Time: { wall: 0, ext: 63809275149, loc: { name: "Local", zone: [...], tx: [...], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: ..., offset: ..., isDST: ...}, }, }, }, FieldsType: "FieldsV1", FieldsV1: { Raw: "{\"f:status\":{\"f:conditions\":{\"k:{\\\"type\\\":\\\"ContainersReady\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:message\":{},\"f:reason\":{},\"f:status\":{},\"f:type\":{}},\"k:{\\\"type\\\":\\\"Initialized\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:status\":{},\"f:type\":{}},\"k:{\\\"type\\\":\\\"Ready\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:message\":{},\"f:reason\":{},\"f:status\":{},\"f:type\":{}}},\"f:containerStatuses\":{},\"f:hostIP\":{},\"f:startTime\":{}}}", }, Subresource: "status", }, ], }, Spec: { Volumes: [ { Name: "kube-api-access-wd8t4", VolumeSource: { HostPath: nil, EmptyDir: nil, GCEPersistentDisk: nil, AWSElasticBlockStore: nil, GitRepo: nil, Secret: nil, NFS: nil, ISCSI: nil, Glusterfs: nil, PersistentVolumeClaim: nil, RBD: nil, FlexVolume: nil, Cinder: nil, CephFS: nil, Flocker: nil, DownwardAPI: nil, FC: nil, AzureFile: nil, ConfigMap: nil, VsphereVolume: nil, Quobyte: nil, AzureDisk: nil, PhotonPersistentDisk: nil, Projected: { Sources: [ { Secret: ..., DownwardAPI: ..., ConfigMap: ..., ServiceAccountToken: ..., }, { Secret: ..., DownwardAPI: ..., ConfigMap: ..., ServiceAccountToken: ..., }, { Secret: ..., DownwardAPI: ..., ConfigMap: ..., ... Gomega truncated this representation as it exceeds 'format.MaxLength'. Consider having the object provide a custom 'GomegaStringer' representation or adjust the parameters in Gomega's 'format' package. Learn more here: https://onsi.github.io/gomega/#adjusting-output Jan 14 06:44:10.257: FAIL: starting pod liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 in namespace container-probe-9484: timed out while waiting for pod container-probe-9484/liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 to be not pending Full Stack Trace k8s.io/kubernetes/test/e2e/common/node.RunLivenessTest(0xc000b942d0, 0xc002f72480, 0x5, 0xc0014db240?) test/e2e/common/node/container_probe.go:955 +0x39a k8s.io/kubernetes/test/e2e/common/node.glob..func2.8() test/e2e/common/node/container_probe.go:207 +0x117 �[1mSTEP:�[0m deleting the pod �[38;5;243m01/14/23 06:44:10.257�[0m [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 Jan 14 06:44:10.371: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 �[1mSTEP:�[0m dump namespace information after failure �[38;5;243m01/14/23 06:44:10.476�[0m �[1mSTEP:�[0m Collecting events from namespace "container-probe-9484". �[38;5;243m01/14/23 06:44:10.476�[0m �[1mSTEP:�[0m Found 11 events. �[38;5;243m01/14/23 06:44:10.58�[0m Jan 14 06:44:10.581: INFO: At 2023-01-14 06:39:09 +0000 UTC - event for liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1: {default-scheduler } Scheduled: Successfully assigned container-probe-9484/liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1 to i-06bd219a44e00580c Jan 14 06:44:10.581: INFO: At 2023-01-14 06:39:10 +0000 UTC - event for liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f940729ffc1aeee1be96a9722bf717460bfe3d304e3a80acc54bc45b2d762ec2": plugin type="cilium-cni" name="cilium" failed (add): unable to allocate IP via local cilium agent: [POST /ipam][502] postIpamFailure No more IPs available Jan 14 06:44:10.581: INFO: At 2023-01-14 06:39:25 +0000 UTC - event for liveness-325cd592-76e2-40dd-bdf3-bb7e7b8d81f1: {kubelet i-06bd219a44e00580c} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "ac3b0fed695f05dbce935707beae776c75c191fef4257d65b89db8e605427f9a": plugin