This job view page is being replaced by Spyglass soon. Check out the new job view.
PRHuang-Wei: [1.11] Automated cherry pick of #75144: kubelet: updated logic of verifying a static critical pod
ResultFAILURE
Tests 1 failed / 33 succeeded
Started2019-03-15 08:32
Elapsed1h12m
Revision
Buildergke-prow-containerd-pool-99179761-8mx1
Refs release-1.11:ede55fd5
74996:4edc2ad3
pod8dd8276a-46fc-11e9-be52-0a580a6c0982
infra-commit031c214dd
pod8dd8276a-46fc-11e9-be52-0a580a6c0982
repok8s.io/kubernetes
repo-commit9ba66057742aabb7e55b036136d20008fc966453
repos{u'k8s.io/kubernetes': u'release-1.11:ede55fd572985547208c79eb73c122f3e8f7f79c,74996:4edc2ad364ef6c7cab97f508d0bb49f49ed5c1b7'}

Test Failures


verify gofmt 16s

make verify WHAT=gofmt
diff -u ./pkg/kubelet/eviction/eviction_manager.go.orig ./pkg/kubelet/eviction/eviction_manager.go
--- ./pkg/kubelet/eviction/eviction_manager.go.orig	2019-03-15 09:19:57.810443075 +0000
+++ ./pkg/kubelet/eviction/eviction_manager.go	2019-03-15 09:19:57.810443075 +0000
@@ -109,15 +109,15 @@
 	clock clock.Clock,
 ) (Manager, lifecycle.PodAdmitHandler) {
 	manager := &managerImpl{
-		clock:                        clock,
-		killPodFunc:                  killPodFunc,
-		mirrorPodFunc:                mirrorPodFunc,
-		imageGC:                      imageGC,
-		containerGC:                  containerGC,
-		config:                       config,
-		recorder:                     recorder,
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           clock,
+		killPodFunc:     killPodFunc,
+		mirrorPodFunc:   mirrorPodFunc,
+		imageGC:         imageGC,
+		containerGC:     containerGC,
+		config:          config,
+		recorder:        recorder,
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 		dedicatedImageFs:             nil,
diff -u ./pkg/kubelet/eviction/eviction_manager_test.go.orig ./pkg/kubelet/eviction/eviction_manager_test.go
--- ./pkg/kubelet/eviction/eviction_manager_test.go.orig	2019-03-15 09:19:57.836445147 +0000
+++ ./pkg/kubelet/eviction/eviction_manager_test.go	2019-03-15 09:19:57.838445307 +0000
@@ -232,14 +232,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -453,14 +453,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -646,14 +646,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -788,14 +788,14 @@
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)}
 	diskGC := &mockDiskGC{fakeSummaryProvider: summaryProvider, err: nil}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -993,14 +993,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("3Mi", "4Mi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1202,15 +1202,15 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		mirrorPodFunc:                mirrorPodFunc,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		mirrorPodFunc:   mirrorPodFunc,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1324,14 +1324,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("4Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1473,14 +1473,14 @@
 	thresholdNotifier.On("UpdateThreshold", summaryProvider.result).Return(nil).Twice()
 
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 		thresholdNotifiers:           []ThresholdNotifier{thresholdNotifier},

Run ./hack/update-gofmt.sh
				
				Click to see stdout/stderrfrom junit_verify.xml

Filter through log files | View test history on testgrid


Show 33 Passed Tests

Error lines from build-log.txt

... skipping 20633 lines ...
W0315 09:18:54.599] wrote "vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD"
W0315 09:18:54.600] wrote "vendor/github.com/coreos/etcd/clientv3/namespace/BUILD"
W0315 09:18:54.600] wrote "vendor/github.com/coreos/etcd/clientv3/naming/BUILD"
W0315 09:18:54.600] wrote "vendor/github.com/coreos/etcd/compactor/BUILD"
W0315 09:18:54.601] wrote "vendor/github.com/coreos/etcd/discovery/BUILD"
W0315 09:18:54.602] wrote "vendor/github.com/coreos/etcd/embed/BUILD"
W0315 09:18:54.602] wrote "vendor/github.com/coreos/etcd/error/BUILD"
W0315 09:18:54.603] wrote "vendor/github.com/coreos/etcd/etcdserver/BUILD"
W0315 09:18:54.603] wrote "vendor/github.com/coreos/etcd/etcdserver/api/BUILD"
W0315 09:18:54.604] wrote "vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD"
W0315 09:18:54.604] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD"
W0315 09:18:54.606] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD"
W0315 09:18:54.606] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD"
... skipping 817 lines ...
W0315 09:20:12.334]  		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
W0315 09:20:12.334]  		thresholdsFirstObservedAt:    thresholdsObservedAt{},
W0315 09:20:12.334]  		thresholdNotifiers:           []ThresholdNotifier{thresholdNotifier},
W0315 09:20:12.334] 
W0315 09:20:12.334] Run ./hack/update-gofmt.sh
I0315 09:20:12.435] +++ exit code: 1
I0315 09:20:12.435] +++ error: 1
I0315 09:20:12.560] FAILED   verify-gofmt.sh	17s
I0315 09:20:12.564] Verifying verify-golint.sh
I0315 09:20:12.603] 
I0315 09:20:12.606] +++ Running case: verify.golint 
I0315 09:20:12.610] +++ working dir: /go/src/k8s.io/kubernetes
I0315 09:20:12.616] +++ command: bash "hack/make-rules/../../hack/verify-golint.sh"
I0315 09:21:55.750] Congratulations!  All Go source files have been linted.
... skipping 198 lines ...
I0315 09:44:04.207] +++ Running case: verify.flags-underscore 
I0315 09:44:04.211] +++ working dir: /go/src/k8s.io/kubernetes
I0315 09:44:04.215] +++ command: python "hack/make-rules/../../hack/verify-flags-underscore.py"
I0315 09:44:06.606] +++ exit code: 0
I0315 09:44:06.925] SUCCESS  verify-flags-underscore.py	2s
I0315 09:44:06.926] ========================
I0315 09:44:06.926] FAILED TESTS
I0315 09:44:06.926] ========================
I0315 09:44:06.926] hack/make-rules/../../hack/verify-gofmt.sh
I0315 09:44:06.928] Makefile:128: recipe for target 'verify' failed
W0315 09:44:07.029] make: *** [verify] Error 1
W0315 09:44:09.850] Traceback (most recent call last):
W0315 09:44:09.851]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 178, in <module>
W0315 09:44:09.851]     ARGS.exclude_typecheck, ARGS.exclude_godep)
W0315 09:44:09.851]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 154, in main
W0315 09:44:09.851]     'bash', '-c', 'cd kubernetes && %s' % script,
W0315 09:44:09.851]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 48, in check
W0315 09:44:09.852]     subprocess.check_call(cmd)
W0315 09:44:09.852]   File "/usr/lib/python2.7/subprocess.py", line 186, in check_call
W0315 09:44:09.874]     raise CalledProcessError(retcode, cmd)
W0315 09:44:09.875] subprocess.CalledProcessError: Command '('docker', 'run', '--rm=true', '--privileged=true', '-v', '/var/run/docker.sock:/var/run/docker.sock', '-v', '/etc/localtime:/etc/localtime:ro', '-v', '/workspace/k8s.io/kubernetes:/go/src/k8s.io/kubernetes', '-v', '/workspace/_artifacts:/workspace/artifacts', '-e', 'KUBE_FORCE_VERIFY_CHECKS=n', '-e', 'KUBE_VERIFY_GIT_BRANCH=release-1.11', '-e', 'EXCLUDE_TYPECHECK=y', '-e', 'EXCLUDE_GODEP=y', '-e', 'REPO_DIR=/workspace/k8s.io/kubernetes', 'gcr.io/k8s-testimages/kubekins-test:1.11-v20190125-cc5d6ecff3', 'bash', '-c', 'cd kubernetes && ./hack/jenkins/verify-dockerized.sh')' returned non-zero exit status 2
E0315 09:44:09.889] Command failed
I0315 09:44:09.889] process 686 exited with code 1 after 65.8m
E0315 09:44:09.890] FAIL: pull-kubernetes-verify
I0315 09:44:09.891] Call:  gcloud auth activate-service-account --key-file=/etc/service-account/service-account.json
W0315 09:44:10.509] Activated service account credentials for: [pr-kubekins@kubernetes-jenkins-pull.iam.gserviceaccount.com]
I0315 09:44:10.585] process 350189 exited with code 0 after 0.0m
I0315 09:44:10.585] Call:  gcloud config get-value account
I0315 09:44:10.994] process 350201 exited with code 0 after 0.0m
I0315 09:44:10.995] Will upload results to gs://kubernetes-jenkins/pr-logs using pr-kubekins@kubernetes-jenkins-pull.iam.gserviceaccount.com
I0315 09:44:10.995] Upload result and artifacts...
I0315 09:44:10.995] Gubernator results at https://gubernator.k8s.io/build/kubernetes-jenkins/pr-logs/pull/74996/pull-kubernetes-verify/126188
I0315 09:44:10.996] Call:  gsutil ls gs://kubernetes-jenkins/pr-logs/pull/74996/pull-kubernetes-verify/126188/artifacts
W0315 09:44:12.609] CommandException: One or more URLs matched no objects.
E0315 09:44:12.807] Command failed
I0315 09:44:12.807] process 350213 exited with code 1 after 0.0m
W0315 09:44:12.807] Remote dir gs://kubernetes-jenkins/pr-logs/pull/74996/pull-kubernetes-verify/126188/artifacts not exist yet
I0315 09:44:12.808] Call:  gsutil -m -q -o GSUtil:use_magicfile=True cp -r -c -z log,txt,xml /workspace/_artifacts gs://kubernetes-jenkins/pr-logs/pull/74996/pull-kubernetes-verify/126188/artifacts
I0315 09:44:15.614] process 350355 exited with code 0 after 0.0m
W0315 09:44:15.614] metadata path /workspace/_artifacts/metadata.json does not exist
W0315 09:44:15.615] metadata not found or invalid, init with empty metadata
... skipping 23 lines ...