This job view page is being replaced by Spyglass soon. Check out the new job view.
PRHuang-Wei: [1.11] Automated cherry pick of #75144: kubelet: updated logic of verifying a static critical pod
ResultFAILURE
Tests 1 failed / 33 succeeded
Started2019-03-15 17:09
Elapsed1h0m
Revision
Buildergke-prow-containerd-pool-99179761-4nhx
Refs release-1.11:ede55fd5
74996:4edc2ad3
podf9ee4355-4744-11e9-bd0d-0a580a6c132b
infra-commitb41ff2592
podf9ee4355-4744-11e9-bd0d-0a580a6c132b
repok8s.io/kubernetes
repo-commit9ba66057742aabb7e55b036136d20008fc966453
repos{u'k8s.io/kubernetes': u'release-1.11:ede55fd572985547208c79eb73c122f3e8f7f79c,74996:4edc2ad364ef6c7cab97f508d0bb49f49ed5c1b7'}

Test Failures


verify gofmt 13s

make verify WHAT=gofmt
diff -u ./pkg/kubelet/eviction/eviction_manager.go.orig ./pkg/kubelet/eviction/eviction_manager.go
--- ./pkg/kubelet/eviction/eviction_manager.go.orig	2019-03-15 17:49:46.848940932 +0000
+++ ./pkg/kubelet/eviction/eviction_manager.go	2019-03-15 17:49:46.848940932 +0000
@@ -109,15 +109,15 @@
 	clock clock.Clock,
 ) (Manager, lifecycle.PodAdmitHandler) {
 	manager := &managerImpl{
-		clock:                        clock,
-		killPodFunc:                  killPodFunc,
-		mirrorPodFunc:                mirrorPodFunc,
-		imageGC:                      imageGC,
-		containerGC:                  containerGC,
-		config:                       config,
-		recorder:                     recorder,
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           clock,
+		killPodFunc:     killPodFunc,
+		mirrorPodFunc:   mirrorPodFunc,
+		imageGC:         imageGC,
+		containerGC:     containerGC,
+		config:          config,
+		recorder:        recorder,
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 		dedicatedImageFs:             nil,
diff -u ./pkg/kubelet/eviction/eviction_manager_test.go.orig ./pkg/kubelet/eviction/eviction_manager_test.go
--- ./pkg/kubelet/eviction/eviction_manager_test.go.orig	2019-03-15 17:49:46.867942554 +0000
+++ ./pkg/kubelet/eviction/eviction_manager_test.go	2019-03-15 17:49:46.867942554 +0000
@@ -232,14 +232,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -453,14 +453,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -646,14 +646,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -788,14 +788,14 @@
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)}
 	diskGC := &mockDiskGC{fakeSummaryProvider: summaryProvider, err: nil}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -993,14 +993,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("3Mi", "4Mi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1202,15 +1202,15 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		mirrorPodFunc:                mirrorPodFunc,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		mirrorPodFunc:   mirrorPodFunc,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1324,14 +1324,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("4Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1473,14 +1473,14 @@
 	thresholdNotifier.On("UpdateThreshold", summaryProvider.result).Return(nil).Twice()
 
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 		thresholdNotifiers:           []ThresholdNotifier{thresholdNotifier},

Run ./hack/update-gofmt.sh
				
				Click to see stdout/stderrfrom junit_verify.xml

Filter through log files | View test history on testgrid


Show 33 Passed Tests

Error lines from build-log.txt

... skipping 20635 lines ...
W0315 17:49:18.460] wrote "vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD"
W0315 17:49:18.460] wrote "vendor/github.com/coreos/etcd/clientv3/namespace/BUILD"
W0315 17:49:18.460] wrote "vendor/github.com/coreos/etcd/clientv3/naming/BUILD"
W0315 17:49:18.460] wrote "vendor/github.com/coreos/etcd/compactor/BUILD"
W0315 17:49:18.461] wrote "vendor/github.com/coreos/etcd/discovery/BUILD"
W0315 17:49:18.462] wrote "vendor/github.com/coreos/etcd/embed/BUILD"
W0315 17:49:18.462] wrote "vendor/github.com/coreos/etcd/error/BUILD"
W0315 17:49:18.463] wrote "vendor/github.com/coreos/etcd/etcdserver/BUILD"
W0315 17:49:18.463] wrote "vendor/github.com/coreos/etcd/etcdserver/api/BUILD"
W0315 17:49:18.463] wrote "vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD"
W0315 17:49:18.464] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD"
W0315 17:49:18.474] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD"
W0315 17:49:18.474] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD"
... skipping 817 lines ...
W0315 17:49:58.674]  		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
W0315 17:49:58.674]  		thresholdsFirstObservedAt:    thresholdsObservedAt{},
W0315 17:49:58.674]  		thresholdNotifiers:           []ThresholdNotifier{thresholdNotifier},
W0315 17:49:58.674] 
W0315 17:49:58.674] Run ./hack/update-gofmt.sh
I0315 17:49:58.775] +++ exit code: 1
I0315 17:49:58.775] +++ error: 1
I0315 17:49:58.873] FAILED   verify-gofmt.sh	13s
I0315 17:49:58.874] Verifying verify-golint.sh
I0315 17:49:58.893] 
I0315 17:49:58.895] +++ Running case: verify.golint 
I0315 17:49:58.897] +++ working dir: /go/src/k8s.io/kubernetes
I0315 17:49:58.900] +++ command: bash "hack/make-rules/../../hack/verify-golint.sh"
I0315 17:51:07.448] Congratulations!  All Go source files have been linted.
... skipping 198 lines ...
I0315 18:09:39.119] +++ Running case: verify.flags-underscore 
I0315 18:09:39.121] +++ working dir: /go/src/k8s.io/kubernetes
I0315 18:09:39.123] +++ command: python "hack/make-rules/../../hack/verify-flags-underscore.py"
I0315 18:09:41.303] +++ exit code: 0
I0315 18:09:41.457] SUCCESS  verify-flags-underscore.py	2s
I0315 18:09:41.458] ========================
I0315 18:09:41.458] FAILED TESTS
I0315 18:09:41.458] ========================
I0315 18:09:41.458] hack/make-rules/../../hack/verify-gofmt.sh
I0315 18:09:41.459] Makefile:128: recipe for target 'verify' failed
W0315 18:09:41.559] make: *** [verify] Error 1
W0315 18:09:47.002] Traceback (most recent call last):
W0315 18:09:47.002]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 178, in <module>
W0315 18:09:47.002]     ARGS.exclude_typecheck, ARGS.exclude_godep)
W0315 18:09:47.002]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 154, in main
W0315 18:09:47.002]     'bash', '-c', 'cd kubernetes && %s' % script,
W0315 18:09:47.002]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 48, in check
W0315 18:09:47.003]     subprocess.check_call(cmd)
W0315 18:09:47.003]   File "/usr/lib/python2.7/subprocess.py", line 186, in check_call
W0315 18:09:47.041]     raise CalledProcessError(retcode, cmd)
W0315 18:09:47.042] subprocess.CalledProcessError: Command '('docker', 'run', '--rm=true', '--privileged=true', '-v', '/var/run/docker.sock:/var/run/docker.sock', '-v', '/etc/localtime:/etc/localtime:ro', '-v', '/workspace/k8s.io/kubernetes:/go/src/k8s.io/kubernetes', '-v', '/workspace/_artifacts:/workspace/artifacts', '-e', 'KUBE_FORCE_VERIFY_CHECKS=n', '-e', 'KUBE_VERIFY_GIT_BRANCH=release-1.11', '-e', 'EXCLUDE_TYPECHECK=y', '-e', 'EXCLUDE_GODEP=y', '-e', 'REPO_DIR=/workspace/k8s.io/kubernetes', 'gcr.io/k8s-testimages/kubekins-test:1.11-v20190125-cc5d6ecff3', 'bash', '-c', 'cd kubernetes && ./hack/jenkins/verify-dockerized.sh')' returned non-zero exit status 2
E0315 18:09:47.050] Command failed
I0315 18:09:47.050] process 697 exited with code 1 after 51.9m
E0315 18:09:47.050] FAIL: pull-kubernetes-verify
I0315 18:09:47.051] Call:  gcloud auth activate-service-account --key-file=/etc/service-account/service-account.json
W0315 18:09:47.690] Activated service account credentials for: [pr-kubekins@kubernetes-jenkins-pull.iam.gserviceaccount.com]
I0315 18:09:47.732] process 350429 exited with code 0 after 0.0m
I0315 18:09:47.732] Call:  gcloud config get-value account
I0315 18:09:47.971] process 350441 exited with code 0 after 0.0m
I0315 18:09:47.971] Will upload results to gs://kubernetes-jenkins/pr-logs using pr-kubekins@kubernetes-jenkins-pull.iam.gserviceaccount.com
I0315 18:09:47.971] Upload result and artifacts...
I0315 18:09:47.972] Gubernator results at https://gubernator.k8s.io/build/kubernetes-jenkins/pr-logs/pull/74996/pull-kubernetes-verify/126206
I0315 18:09:47.972] Call:  gsutil ls gs://kubernetes-jenkins/pr-logs/pull/74996/pull-kubernetes-verify/126206/artifacts
W0315 18:09:48.984] CommandException: One or more URLs matched no objects.
E0315 18:09:49.100] Command failed
I0315 18:09:49.100] process 350453 exited with code 1 after 0.0m
W0315 18:09:49.100] Remote dir gs://kubernetes-jenkins/pr-logs/pull/74996/pull-kubernetes-verify/126206/artifacts not exist yet
I0315 18:09:49.101] Call:  gsutil -m -q -o GSUtil:use_magicfile=True cp -r -c -z log,txt,xml /workspace/_artifacts gs://kubernetes-jenkins/pr-logs/pull/74996/pull-kubernetes-verify/126206/artifacts
I0315 18:09:51.557] process 350595 exited with code 0 after 0.0m
W0315 18:09:51.557] metadata path /workspace/_artifacts/metadata.json does not exist
W0315 18:09:51.557] metadata not found or invalid, init with empty metadata
... skipping 23 lines ...