This job view page is being replaced by Spyglass soon. Check out the new job view.
PRHuang-Wei: [1.12] Automated cherry pick of #75144: kubelet: updated logic of verifying a static critical pod
ResultFAILURE
Tests 1 failed / 34 succeeded
Started2019-03-15 08:20
Elapsed1h14m
Revision
Buildergke-prow-containerd-pool-99179761-k7t7
Refs release-1.12:9166af37
74995:006b852b
pod0ef610c8-46fb-11e9-bd0d-0a580a6c132b
infra-commit031c214dd
pod0ef610c8-46fb-11e9-bd0d-0a580a6c132b
repok8s.io/kubernetes
repo-commit16201c619712849a4636c38f7dad504b8bd16e0c
repos{u'k8s.io/kubernetes': u'release-1.12:9166af3752c14e27d83fe141489552ecd55eee5c,74995:006b852b0cf28272343d3bc28a260c9b0015e928'}

Test Failures


verify gofmt 17s

make verify WHAT=gofmt
diff -u ./pkg/kubelet/eviction/eviction_manager.go.orig ./pkg/kubelet/eviction/eviction_manager.go
--- ./pkg/kubelet/eviction/eviction_manager.go.orig	2019-03-15 09:11:04.807656919 +0000
+++ ./pkg/kubelet/eviction/eviction_manager.go	2019-03-15 09:11:04.807656919 +0000
@@ -109,15 +109,15 @@
 	clock clock.Clock,
 ) (Manager, lifecycle.PodAdmitHandler) {
 	manager := &managerImpl{
-		clock:                        clock,
-		killPodFunc:                  killPodFunc,
-		mirrorPodFunc:                mirrorPodFunc,
-		imageGC:                      imageGC,
-		containerGC:                  containerGC,
-		config:                       config,
-		recorder:                     recorder,
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           clock,
+		killPodFunc:     killPodFunc,
+		mirrorPodFunc:   mirrorPodFunc,
+		imageGC:         imageGC,
+		containerGC:     containerGC,
+		config:          config,
+		recorder:        recorder,
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 		dedicatedImageFs:             nil,
diff -u ./pkg/kubelet/eviction/eviction_manager_test.go.orig ./pkg/kubelet/eviction/eviction_manager_test.go
--- ./pkg/kubelet/eviction/eviction_manager_test.go.orig	2019-03-15 09:11:04.830656915 +0000
+++ ./pkg/kubelet/eviction/eviction_manager_test.go	2019-03-15 09:11:04.831656915 +0000
@@ -232,14 +232,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -453,14 +453,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -646,14 +646,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -788,14 +788,14 @@
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)}
 	diskGC := &mockDiskGC{fakeSummaryProvider: summaryProvider, err: nil}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -993,14 +993,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("3Mi", "4Mi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1202,15 +1202,15 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		mirrorPodFunc:                mirrorPodFunc,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		mirrorPodFunc:   mirrorPodFunc,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1324,14 +1324,14 @@
 	}
 	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("4Gi", podStats)}
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 	}
@@ -1473,14 +1473,14 @@
 	thresholdNotifier.On("UpdateThreshold", summaryProvider.result).Return(nil).Twice()
 
 	manager := &managerImpl{
-		clock:                        fakeClock,
-		killPodFunc:                  podKiller.killPodNow,
-		imageGC:                      diskGC,
-		containerGC:                  diskGC,
-		config:                       config,
-		recorder:                     &record.FakeRecorder{},
-		summaryProvider:              summaryProvider,
-		nodeRef:                      nodeRef,
+		clock:           fakeClock,
+		killPodFunc:     podKiller.killPodNow,
+		imageGC:         diskGC,
+		containerGC:     diskGC,
+		config:          config,
+		recorder:        &record.FakeRecorder{},
+		summaryProvider: summaryProvider,
+		nodeRef:         nodeRef,
 		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 		thresholdsFirstObservedAt:    thresholdsObservedAt{},
 		thresholdNotifiers:           []ThresholdNotifier{thresholdNotifier},

Run ./hack/update-gofmt.sh
				
				Click to see stdout/stderrfrom junit_verify.xml

Filter through log files | View test history on testgrid


Show 34 Passed Tests

Error lines from build-log.txt

... skipping 35459 lines ...
W0315 09:10:23.819] wrote "vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD"
W0315 09:10:23.819] wrote "vendor/github.com/coreos/etcd/clientv3/namespace/BUILD"
W0315 09:10:23.819] wrote "vendor/github.com/coreos/etcd/clientv3/naming/BUILD"
W0315 09:10:23.820] wrote "vendor/github.com/coreos/etcd/compactor/BUILD"
W0315 09:10:23.820] wrote "vendor/github.com/coreos/etcd/discovery/BUILD"
W0315 09:10:23.821] wrote "vendor/github.com/coreos/etcd/embed/BUILD"
W0315 09:10:23.821] wrote "vendor/github.com/coreos/etcd/error/BUILD"
W0315 09:10:23.822] wrote "vendor/github.com/coreos/etcd/etcdserver/BUILD"
W0315 09:10:23.822] wrote "vendor/github.com/coreos/etcd/etcdserver/api/BUILD"
W0315 09:10:23.823] wrote "vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD"
W0315 09:10:23.823] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD"
W0315 09:10:23.823] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD"
W0315 09:10:23.824] wrote "vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD"
... skipping 869 lines ...
W0315 09:11:19.568]  		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
W0315 09:11:19.568]  		thresholdsFirstObservedAt:    thresholdsObservedAt{},
W0315 09:11:19.569]  		thresholdNotifiers:           []ThresholdNotifier{thresholdNotifier},
W0315 09:11:19.569] 
W0315 09:11:19.569] Run ./hack/update-gofmt.sh
I0315 09:11:19.669] +++ exit code: 1
I0315 09:11:19.670] +++ error: 1
I0315 09:11:19.875] FAILED   verify-gofmt.sh	17s
I0315 09:11:19.877] Verifying verify-golint.sh
I0315 09:11:19.907] 
I0315 09:11:19.910] +++ Running case: verify.golint 
I0315 09:11:19.913] +++ working dir: /go/src/k8s.io/kubernetes
I0315 09:11:19.916] +++ command: bash "hack/make-rules/../../hack/verify-golint.sh"
I0315 09:11:19.974] installing golint from vendor
... skipping 229 lines ...
I0315 09:34:27.434] +++ Running case: verify.flags-underscore 
I0315 09:34:27.437] +++ working dir: /go/src/k8s.io/kubernetes
I0315 09:34:27.441] +++ command: python "hack/make-rules/../../hack/verify-flags-underscore.py"
I0315 09:34:29.990] +++ exit code: 0
I0315 09:34:30.291] SUCCESS  verify-flags-underscore.py	3s
I0315 09:34:30.291] ========================
I0315 09:34:30.291] FAILED TESTS
I0315 09:34:30.291] ========================
I0315 09:34:30.291] hack/make-rules/../../hack/verify-gofmt.sh
I0315 09:34:30.294] Makefile:128: recipe for target 'verify' failed
W0315 09:34:30.394] make: *** [verify] Error 1
W0315 09:34:36.624] Traceback (most recent call last):
W0315 09:34:36.624]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 178, in <module>
W0315 09:34:36.624]     ARGS.exclude_typecheck, ARGS.exclude_godep)
W0315 09:34:36.624]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 154, in main
W0315 09:34:36.625]     'bash', '-c', 'cd kubernetes && %s' % script,
W0315 09:34:36.625]   File "/workspace/./test-infra/jenkins/../scenarios/kubernetes_verify.py", line 48, in check
W0315 09:34:36.625]     subprocess.check_call(cmd)
W0315 09:34:36.625]   File "/usr/lib/python2.7/subprocess.py", line 186, in check_call
W0315 09:34:36.655]     raise CalledProcessError(retcode, cmd)
W0315 09:34:36.656] subprocess.CalledProcessError: Command '('docker', 'run', '--rm=true', '--privileged=true', '-v', '/var/run/docker.sock:/var/run/docker.sock', '-v', '/etc/localtime:/etc/localtime:ro', '-v', '/workspace/k8s.io/kubernetes:/go/src/k8s.io/kubernetes', '-v', '/workspace/_artifacts:/workspace/artifacts', '-e', 'KUBE_FORCE_VERIFY_CHECKS=n', '-e', 'KUBE_VERIFY_GIT_BRANCH=release-1.12', '-e', 'EXCLUDE_TYPECHECK=y', '-e', 'EXCLUDE_GODEP=y', '-e', 'REPO_DIR=/workspace/k8s.io/kubernetes', 'gcr.io/k8s-testimages/kubekins-test:1.12-v20190125-cc5d6ecff3', 'bash', '-c', 'cd kubernetes && ./hack/jenkins/verify-dockerized.sh')' returned non-zero exit status 2
E0315 09:34:36.670] Command failed
I0315 09:34:36.671] process 697 exited with code 1 after 67.2m
E0315 09:34:36.671] FAIL: pull-kubernetes-verify
I0315 09:34:36.672] Call:  gcloud auth activate-service-account --key-file=/etc/service-account/service-account.json
W0315 09:34:37.242] Activated service account credentials for: [pr-kubekins@kubernetes-jenkins-pull.iam.gserviceaccount.com]
I0315 09:34:37.285] process 369758 exited with code 0 after 0.0m
I0315 09:34:37.286] Call:  gcloud config get-value account
I0315 09:34:37.591] process 369770 exited with code 0 after 0.0m
I0315 09:34:37.592] Will upload results to gs://kubernetes-jenkins/pr-logs using pr-kubekins@kubernetes-jenkins-pull.iam.gserviceaccount.com
I0315 09:34:37.592] Upload result and artifacts...
I0315 09:34:37.592] Gubernator results at https://gubernator.k8s.io/build/kubernetes-jenkins/pr-logs/pull/74995/pull-kubernetes-verify/126186
I0315 09:34:37.593] Call:  gsutil ls gs://kubernetes-jenkins/pr-logs/pull/74995/pull-kubernetes-verify/126186/artifacts
W0315 09:34:38.946] CommandException: One or more URLs matched no objects.
E0315 09:34:39.107] Command failed
I0315 09:34:39.108] process 369782 exited with code 1 after 0.0m
W0315 09:34:39.108] Remote dir gs://kubernetes-jenkins/pr-logs/pull/74995/pull-kubernetes-verify/126186/artifacts not exist yet
I0315 09:34:39.108] Call:  gsutil -m -q -o GSUtil:use_magicfile=True cp -r -c -z log,txt,xml /workspace/_artifacts gs://kubernetes-jenkins/pr-logs/pull/74995/pull-kubernetes-verify/126186/artifacts
I0315 09:34:41.809] process 369924 exited with code 0 after 0.0m
W0315 09:34:41.809] metadata path /workspace/_artifacts/metadata.json does not exist
W0315 09:34:41.809] metadata not found or invalid, init with empty metadata
... skipping 23 lines ...