This job view page is being replaced by Spyglass soon. Check out the new job view.
PRJulienBalestra: controller/discoverer: add readiness check
ResultFAILURE
Tests 0 failed / 0 succeeded
Started2019-08-13 16:07
Elapsed3m59s
Revisionbbbf190c0d1f656c765cb4c519d2511786dd4ef0
Refs 135

No Test Failures!


Error lines from build-log.txt

... skipping 840 lines ...
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/k8s.io/kubernetes/pkg/kubelet/metrics
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/k8s.io/client-go/util/jsonpath
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/sigs.k8s.io/kustomize/pkg/gvk
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/sigs.k8s.io/kustomize/pkg/image
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/sigs.k8s.io/kustomize/pkg/internal/error
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/sigs.k8s.io/kustomize/pkg/expansion
encoding/gob
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/github.com/mailru/easyjson/jlexer
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/github.com/mailru/easyjson/buffer
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/sigs.k8s.io/kustomize/pkg/patch
sigs.k8s.io/sig-storage-local-static-provisioner/vendor/k8s.io/apiserver/pkg/audit
... skipping 1205 lines ...
77a77
> type pusherDelegator struct{ *responseWriterDelegator }
79a80,81
> 	//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
> 	//remove support from client_golang yet.
95a98,100
> func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
> 	return d.ResponseWriter.(http.Pusher).Push(target, opts)
> }
198a204,356
> 	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
> 		return pusherDelegator{d}
> 	}
... skipping 188 lines ...
< 	// log errors (by providing an ErrorLog in HandlerOpts) to not mask
< 	// errors completely.
---
> 	// effort" metrics collection scenarios. In this case, it is highly
> 	// recommended to provide other means of detecting errors: By setting an
> 	// ErrorLog in HandlerOpts, the errors are logged. By providing a
> 	// Registry in HandlerOpts, the exposed metrics include an error counter
> 	// "promhttp_metric_handler_errors_total", which can be used for
> 	// alerts.
264a291,302
> 	// If Registry is not nil, it is used to register a metric
> 	// "promhttp_metric_handler_errors_total", partitioned by "cause". A
> 	// failed registration causes a panic. Note that this error counter is
> 	// different from the instrumentation you get from the various
> 	// InstrumentHandler... helpers. It counts errors that don't necessarily
> 	// result in a non-2xx HTTP status code. There are two typical cases:
> 	// (1) Encoding errors that only happen after streaming of the HTTP body
> 	// has already started (and the status code 200 has been sent). This
> 	// should only happen with custom collectors. (2) Collection errors with
... skipping 36 lines ...
> // InstrumentTrace struct are ignored. Times reported to the hook functions are
> // time since the start of the request. Only with Go1.9+, those times are
> // guaranteed to never be negative. (Earlier Go versions are not using a
> // monotonic clock.) Note that partitioning of Histograms is expensive and
> // should be used judiciously.
> //
> // For hook functions that receive an error as an argument, no observations are
> // made in the event of a non-nil error value.
> //
> // See the example for ExampleInstrumentRoundTripperDuration for example usage.
> func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
> 	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
> 		start := time.Now()
> 
> 		trace := &httptrace.ClientTrace{
> 			GotConn: func(_ httptrace.GotConnInfo) {
> 				if it.GotConn != nil {
> 					it.GotConn(time.Since(start).Seconds())
> 				}
> 			},
> 			PutIdleConn: func(err error) {
> 				if err != nil {
> 					return
> 				}
> 				if it.PutIdleConn != nil {
> 					it.PutIdleConn(time.Since(start).Seconds())
> 				}
... skipping 10 lines ...
> 			},
> 			ConnectStart: func(_, _ string) {
> 				if it.ConnectStart != nil {
> 					it.ConnectStart(time.Since(start).Seconds())
> 				}
> 			},
> 			ConnectDone: func(_, _ string, err error) {
> 				if err != nil {
> 					return
> 				}
> 				if it.ConnectDone != nil {
> 					it.ConnectDone(time.Since(start).Seconds())
> 				}
... skipping 10 lines ...
> 			},
> 			TLSHandshakeStart: func() {
> 				if it.TLSHandshakeStart != nil {
> 					it.TLSHandshakeStart(time.Since(start).Seconds())
> 				}
> 			},
> 			TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
> 				if err != nil {
> 					return
> 				}
> 				if it.TLSHandshakeDone != nil {
> 					it.TLSHandshakeDone(time.Since(start).Seconds())
> 				}
... skipping 165 lines ...
> 	}
> 	// Increment count last as we take it as a signal that the observation
> 	// is complete.
> 	atomic.AddUint64(&hotCounts.count, 1)
> }
> 
> func (s *noObjectivesSummary) Write(out *dto.Metric) error {
> 	// For simplicity, we protect this whole method by a mutex. It is not in
> 	// the hot path, i.e. Observe is called much more often than Write. The
> 	// complication of making Write lock-free isn't worth it, if possible at
> 	// all.
> 	s.writeMtx.Lock()
> 	defer s.writeMtx.Unlock()
... skipping 64 lines ...
> func (c *wrappingCollector) unwrapRecursively() Collector {
> 	switch wc := c.wrappedCollector.(type) {
> 	case *wrappingCollector:
> 		return wc.unwrapRecursively()
> 	default:
> 		return wc
Makefile:50: recipe for target 'verify' failed
make: *** [verify] Error 1
+ EXIT_VALUE=2
+ set +o xtrace