This job view page is being replaced by Spyglass soon. Check out the new job view.
PRwk8: Make container removal fail if platform-specific containers fail
ResultFAILURE
Tests 1 failed / 2810 succeeded
Started2019-08-23 01:04
Elapsed29m4s
Revision
Buildergke-prow-ssd-pool-1a225945-8ghq
Refs master:c369cf18
80320:4d4edcb2
pode73a72a2-c541-11e9-9342-e69cf4ca5bc2
infra-commitc62e95a9f
pode73a72a2-c541-11e9-9342-e69cf4ca5bc2
repok8s.io/kubernetes
repo-commitcd74bd1bf27c93a2483fc84fbf2e07c15de75bf1
repos{u'k8s.io/kubernetes': u'master:c369cf187ea765c0a2387f2b39abe6ed18c8e6a8,80320:4d4edcb27b63f764ad91cc6da0ad23638824f158'}

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestNodePIDPressure 34s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestNodePIDPressure$
=== RUN   TestNodePIDPressure
I0823 01:29:25.167113  108223 services.go:33] Network range for service cluster IPs is unspecified. Defaulting to {10.0.0.0 ffffff00}.
I0823 01:29:25.167140  108223 services.go:45] Setting service IP to "10.0.0.1" (read-write).
I0823 01:29:25.167152  108223 master.go:278] Node port range unspecified. Defaulting to 30000-32767.
I0823 01:29:25.167170  108223 master.go:234] Using reconciler: 
I0823 01:29:25.169449  108223 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.170112  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.170239  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.172601  108223 store.go:1342] Monitoring podtemplates count at <storage-prefix>//podtemplates
I0823 01:29:25.172825  108223 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.174647  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.172739  108223 reflector.go:158] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I0823 01:29:25.174841  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.175821  108223 watch_cache.go:405] Replace watchCache (rev: 29821) 
I0823 01:29:25.181711  108223 store.go:1342] Monitoring events count at <storage-prefix>//events
I0823 01:29:25.181779  108223 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.182022  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.182072  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.182214  108223 reflector.go:158] Listing and watching *core.Event from storage/cacher.go:/events
I0823 01:29:25.182973  108223 store.go:1342] Monitoring limitranges count at <storage-prefix>//limitranges
I0823 01:29:25.183019  108223 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.183241  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.183273  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.183456  108223 reflector.go:158] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I0823 01:29:25.183790  108223 store.go:1342] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I0823 01:29:25.183965  108223 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.184196  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.184219  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.184392  108223 reflector.go:158] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I0823 01:29:25.184777  108223 store.go:1342] Monitoring secrets count at <storage-prefix>//secrets
I0823 01:29:25.184929  108223 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.185161  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.185188  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.185519  108223 reflector.go:158] Listing and watching *core.Secret from storage/cacher.go:/secrets
I0823 01:29:25.186436  108223 watch_cache.go:405] Replace watchCache (rev: 29821) 
I0823 01:29:25.187127  108223 reflector.go:158] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I0823 01:29:25.186091  108223 store.go:1342] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I0823 01:29:25.188089  108223 watch_cache.go:405] Replace watchCache (rev: 29821) 
I0823 01:29:25.188531  108223 watch_cache.go:405] Replace watchCache (rev: 29821) 
I0823 01:29:25.188791  108223 watch_cache.go:405] Replace watchCache (rev: 29821) 
I0823 01:29:25.189362  108223 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.190141  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.192413  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.192728  108223 store.go:1342] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I0823 01:29:25.192989  108223 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.193408  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.192099  108223 watch_cache.go:405] Replace watchCache (rev: 29821) 
I0823 01:29:25.193808  108223 reflector.go:158] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I0823 01:29:25.194671  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.195414  108223 store.go:1342] Monitoring configmaps count at <storage-prefix>//configmaps
I0823 01:29:25.195699  108223 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.195888  108223 reflector.go:158] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I0823 01:29:25.196659  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.197009  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.197278  108223 store.go:1342] Monitoring namespaces count at <storage-prefix>//namespaces
I0823 01:29:25.198111  108223 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.198571  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.198721  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.198908  108223 watch_cache.go:405] Replace watchCache (rev: 29821) 
I0823 01:29:25.197432  108223 reflector.go:158] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I0823 01:29:25.199523  108223 store.go:1342] Monitoring endpoints count at <storage-prefix>//services/endpoints
I0823 01:29:25.199827  108223 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.202852  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.200058  108223 reflector.go:158] Listing and watching *core.Endpoints from storage/cacher.go:/services/endpoints
I0823 01:29:25.202352  108223 watch_cache.go:405] Replace watchCache (rev: 29821) 
I0823 01:29:25.205247  108223 watch_cache.go:405] Replace watchCache (rev: 29822) 
I0823 01:29:25.205568  108223 watch_cache.go:405] Replace watchCache (rev: 29822) 
I0823 01:29:25.206077  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.208270  108223 store.go:1342] Monitoring nodes count at <storage-prefix>//minions
I0823 01:29:25.208351  108223 reflector.go:158] Listing and watching *core.Node from storage/cacher.go:/minions
I0823 01:29:25.209267  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.209888  108223 watch_cache.go:405] Replace watchCache (rev: 29822) 
I0823 01:29:25.210329  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.210516  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.211287  108223 store.go:1342] Monitoring pods count at <storage-prefix>//pods
I0823 01:29:25.211440  108223 reflector.go:158] Listing and watching *core.Pod from storage/cacher.go:/pods
I0823 01:29:25.213137  108223 watch_cache.go:405] Replace watchCache (rev: 29822) 
I0823 01:29:25.214563  108223 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.214990  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.215149  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.215588  108223 store.go:1342] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I0823 01:29:25.215759  108223 reflector.go:158] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I0823 01:29:25.216388  108223 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.217060  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.217209  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.217397  108223 watch_cache.go:405] Replace watchCache (rev: 29822) 
I0823 01:29:25.223708  108223 store.go:1342] Monitoring services count at <storage-prefix>//services/specs
I0823 01:29:25.223830  108223 reflector.go:158] Listing and watching *core.Service from storage/cacher.go:/services/specs
I0823 01:29:25.223929  108223 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.225325  108223 watch_cache.go:405] Replace watchCache (rev: 29822) 
I0823 01:29:25.226464  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.226689  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.227714  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.227990  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.228430  108223 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.229189  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.229312  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.231612  108223 store.go:1342] Monitoring replicationcontrollers count at <storage-prefix>//controllers
I0823 01:29:25.231698  108223 reflector.go:158] Listing and watching *core.ReplicationController from storage/cacher.go:/controllers
I0823 01:29:25.232543  108223 storage_factory.go:285] storing bindings in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.232789  108223 storage_factory.go:285] storing componentstatuses in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.233089  108223 watch_cache.go:405] Replace watchCache (rev: 29822) 
I0823 01:29:25.234554  108223 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.235601  108223 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.236393  108223 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.237183  108223 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.237625  108223 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.237732  108223 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.237993  108223 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.239134  108223 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.239852  108223 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.240062  108223 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.240813  108223 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.241167  108223 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.241786  108223 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.242386  108223 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.243123  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.243364  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.243571  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.243699  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.243871  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.244079  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.244663  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.245703  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.246242  108223 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.247234  108223 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.248094  108223 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.248473  108223 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.248836  108223 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.249680  108223 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.250086  108223 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.251096  108223 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.252186  108223 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.253139  108223 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.254194  108223 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.254877  108223 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.255211  108223 master.go:423] Skipping disabled API group "auditregistration.k8s.io".
I0823 01:29:25.255331  108223 master.go:434] Enabling API group "authentication.k8s.io".
I0823 01:29:25.255435  108223 master.go:434] Enabling API group "authorization.k8s.io".
I0823 01:29:25.255680  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.256328  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.256528  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.263700  108223 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0823 01:29:25.263776  108223 reflector.go:158] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0823 01:29:25.264558  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.264930  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.264963  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.265436  108223 watch_cache.go:405] Replace watchCache (rev: 29823) 
I0823 01:29:25.265672  108223 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0823 01:29:25.266021  108223 reflector.go:158] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0823 01:29:25.266359  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.266667  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.266695  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.267737  108223 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0823 01:29:25.267762  108223 master.go:434] Enabling API group "autoscaling".
I0823 01:29:25.267809  108223 reflector.go:158] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0823 01:29:25.268388  108223 watch_cache.go:405] Replace watchCache (rev: 29823) 
I0823 01:29:25.268725  108223 watch_cache.go:405] Replace watchCache (rev: 29823) 
I0823 01:29:25.281611  108223 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.281887  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.281921  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.282861  108223 store.go:1342] Monitoring jobs.batch count at <storage-prefix>//jobs
I0823 01:29:25.283031  108223 reflector.go:158] Listing and watching *batch.Job from storage/cacher.go:/jobs
I0823 01:29:25.284648  108223 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.284907  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.284939  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.285142  108223 store.go:1342] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I0823 01:29:25.285178  108223 master.go:434] Enabling API group "batch".
I0823 01:29:25.285337  108223 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.285465  108223 watch_cache.go:405] Replace watchCache (rev: 29824) 
I0823 01:29:25.285570  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.285596  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.285713  108223 store.go:1342] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I0823 01:29:25.285732  108223 master.go:434] Enabling API group "certificates.k8s.io".
I0823 01:29:25.285733  108223 reflector.go:158] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I0823 01:29:25.285883  108223 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.285970  108223 reflector.go:158] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I0823 01:29:25.286121  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.286790  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.286942  108223 store.go:1342] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0823 01:29:25.287145  108223 reflector.go:158] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0823 01:29:25.287094  108223 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.287514  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.287538  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.287609  108223 store.go:1342] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0823 01:29:25.287857  108223 reflector.go:158] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0823 01:29:25.290410  108223 master.go:434] Enabling API group "coordination.k8s.io".
I0823 01:29:25.290593  108223 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.290852  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.290875  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.291266  108223 store.go:1342] Monitoring ingresses.networking.k8s.io count at <storage-prefix>//ingress
I0823 01:29:25.291323  108223 master.go:434] Enabling API group "extensions".
I0823 01:29:25.291380  108223 reflector.go:158] Listing and watching *networking.Ingress from storage/cacher.go:/ingress
I0823 01:29:25.291631  108223 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.291798  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.291818  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.292319  108223 watch_cache.go:405] Replace watchCache (rev: 29824) 
I0823 01:29:25.292514  108223 watch_cache.go:405] Replace watchCache (rev: 29824) 
I0823 01:29:25.293679  108223 store.go:1342] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0823 01:29:25.293823  108223 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.293922  108223 reflector.go:158] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0823 01:29:25.295272  108223 watch_cache.go:405] Replace watchCache (rev: 29824) 
I0823 01:29:25.297210  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.297237  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.297372  108223 store.go:1342] Monitoring ingresses.networking.k8s.io count at <storage-prefix>//ingress
I0823 01:29:25.297390  108223 master.go:434] Enabling API group "networking.k8s.io".
I0823 01:29:25.297423  108223 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.297595  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.297616  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.297724  108223 store.go:1342] Monitoring runtimeclasses.node.k8s.io count at <storage-prefix>//runtimeclasses
I0823 01:29:25.297737  108223 master.go:434] Enabling API group "node.k8s.io".
I0823 01:29:25.297944  108223 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.298126  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.298146  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.298273  108223 store.go:1342] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I0823 01:29:25.308021  108223 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.298500  108223 reflector.go:158] Listing and watching *networking.Ingress from storage/cacher.go:/ingress
I0823 01:29:25.298644  108223 watch_cache.go:405] Replace watchCache (rev: 29824) 
I0823 01:29:25.298803  108223 watch_cache.go:405] Replace watchCache (rev: 29824) 
I0823 01:29:25.299123  108223 reflector.go:158] Listing and watching *node.RuntimeClass from storage/cacher.go:/runtimeclasses
I0823 01:29:25.299171  108223 reflector.go:158] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I0823 01:29:25.305393  108223 watch_cache.go:405] Replace watchCache (rev: 29825) 
I0823 01:29:25.312383  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.312529  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.314241  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.312923  108223 watch_cache.go:405] Replace watchCache (rev: 29827) 
I0823 01:29:25.312943  108223 watch_cache.go:405] Replace watchCache (rev: 29827) 
I0823 01:29:25.316863  108223 store.go:1342] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicy
I0823 01:29:25.317206  108223 master.go:434] Enabling API group "policy".
I0823 01:29:25.317111  108223 reflector.go:158] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicy
I0823 01:29:25.319532  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.320547  108223 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.320936  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.321090  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.324083  108223 store.go:1342] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0823 01:29:25.324255  108223 reflector.go:158] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0823 01:29:25.327259  108223 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.327574  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.327848  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.327418  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.328884  108223 store.go:1342] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0823 01:29:25.329113  108223 reflector.go:158] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0823 01:29:25.329198  108223 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.329835  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.329954  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.330524  108223 store.go:1342] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0823 01:29:25.330660  108223 reflector.go:158] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0823 01:29:25.331312  108223 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.331639  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.331745  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.332311  108223 store.go:1342] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0823 01:29:25.332415  108223 reflector.go:158] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0823 01:29:25.332532  108223 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.333165  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.333289  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.333862  108223 store.go:1342] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0823 01:29:25.333994  108223 reflector.go:158] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0823 01:29:25.334635  108223 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.334840  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.334938  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.335398  108223 store.go:1342] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0823 01:29:25.335581  108223 reflector.go:158] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0823 01:29:25.335886  108223 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.336121  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.336229  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.336757  108223 store.go:1342] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0823 01:29:25.336817  108223 reflector.go:158] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0823 01:29:25.337339  108223 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.337533  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.337627  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.338099  108223 store.go:1342] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0823 01:29:25.338332  108223 master.go:434] Enabling API group "rbac.authorization.k8s.io".
I0823 01:29:25.338273  108223 reflector.go:158] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0823 01:29:25.340179  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.342452  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.342577  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.342733  108223 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.342867  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.342885  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.342903  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.342903  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.342957  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.342992  108223 store.go:1342] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0823 01:29:25.343029  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.343164  108223 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.343358  108223 reflector.go:158] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0823 01:29:25.343768  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.343895  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.344100  108223 store.go:1342] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0823 01:29:25.344866  108223 master.go:434] Enabling API group "scheduling.k8s.io".
I0823 01:29:25.345181  108223 master.go:423] Skipping disabled API group "settings.k8s.io".
I0823 01:29:25.344713  108223 reflector.go:158] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0823 01:29:25.345429  108223 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.345987  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.346010  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.346032  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.346212  108223 store.go:1342] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0823 01:29:25.346259  108223 reflector.go:158] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0823 01:29:25.346508  108223 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.346673  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.347067  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.347233  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.347556  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.347871  108223 store.go:1342] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0823 01:29:25.348000  108223 reflector.go:158] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0823 01:29:25.348005  108223 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.348396  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.348422  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.348500  108223 store.go:1342] Monitoring csinodes.storage.k8s.io count at <storage-prefix>//csinodes
I0823 01:29:25.348535  108223 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.348631  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.348651  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.348715  108223 store.go:1342] Monitoring csidrivers.storage.k8s.io count at <storage-prefix>//csidrivers
I0823 01:29:25.348857  108223 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.348971  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.348979  108223 reflector.go:158] Listing and watching *storage.CSINode from storage/cacher.go:/csinodes
I0823 01:29:25.348994  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.349127  108223 store.go:1342] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0823 01:29:25.349252  108223 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.349479  108223 reflector.go:158] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0823 01:29:25.349282  108223 reflector.go:158] Listing and watching *storage.CSIDriver from storage/cacher.go:/csidrivers
I0823 01:29:25.349488  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.349563  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.349662  108223 store.go:1342] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0823 01:29:25.349678  108223 master.go:434] Enabling API group "storage.k8s.io".
I0823 01:29:25.349773  108223 reflector.go:158] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0823 01:29:25.349825  108223 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.349939  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.349954  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.350077  108223 store.go:1342] Monitoring deployments.apps count at <storage-prefix>//deployments
I0823 01:29:25.350223  108223 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.350319  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.350331  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.350423  108223 store.go:1342] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0823 01:29:25.350536  108223 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.350586  108223 reflector.go:158] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0823 01:29:25.350638  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.350652  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.350718  108223 reflector.go:158] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0823 01:29:25.350762  108223 store.go:1342] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0823 01:29:25.350870  108223 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.350895  108223 reflector.go:158] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0823 01:29:25.350959  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.350974  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.351096  108223 store.go:1342] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0823 01:29:25.351219  108223 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.351291  108223 reflector.go:158] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0823 01:29:25.351356  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.351370  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.351436  108223 store.go:1342] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0823 01:29:25.351448  108223 master.go:434] Enabling API group "apps".
I0823 01:29:25.351471  108223 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.351748  108223 reflector.go:158] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0823 01:29:25.351941  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.351959  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.355822  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.356147  108223 store.go:1342] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0823 01:29:25.356206  108223 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.356285  108223 reflector.go:158] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0823 01:29:25.356565  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.356591  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.358961  108223 store.go:1342] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0823 01:29:25.359082  108223 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.359207  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.359280  108223 reflector.go:158] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0823 01:29:25.359312  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.359205  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.359212  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.359371  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.359540  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.359577  108223 store.go:1342] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0823 01:29:25.359594  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.359597  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.359608  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.359612  108223 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.359720  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.359736  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.359829  108223 store.go:1342] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0823 01:29:25.359849  108223 master.go:434] Enabling API group "admissionregistration.k8s.io".
I0823 01:29:25.359878  108223 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.359894  108223 reflector.go:158] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0823 01:29:25.359914  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.359948  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.360161  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:25.360187  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:25.360354  108223 store.go:1342] Monitoring events count at <storage-prefix>//events
I0823 01:29:25.360364  108223 reflector.go:158] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0823 01:29:25.360374  108223 master.go:434] Enabling API group "events.k8s.io".
I0823 01:29:25.360460  108223 reflector.go:158] Listing and watching *core.Event from storage/cacher.go:/events
I0823 01:29:25.360695  108223 storage_factory.go:285] storing tokenreviews.authentication.k8s.io in authentication.k8s.io/v1, reading as authentication.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.360820  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.360898  108223 storage_factory.go:285] storing tokenreviews.authentication.k8s.io in authentication.k8s.io/v1, reading as authentication.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.363834  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.363839  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.363837  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.364667  108223 storage_factory.go:285] storing localsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.364976  108223 watch_cache.go:405] Replace watchCache (rev: 29828) 
I0823 01:29:25.366098  108223 storage_factory.go:285] storing selfsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.366316  108223 storage_factory.go:285] storing selfsubjectrulesreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.366471  108223 storage_factory.go:285] storing subjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.366712  108223 storage_factory.go:285] storing localsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.373280  108223 storage_factory.go:285] storing selfsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.373575  108223 storage_factory.go:285] storing selfsubjectrulesreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.373759  108223 storage_factory.go:285] storing subjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.374806  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.375216  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.376109  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.376440  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.377280  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.377769  108223 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.379104  108223 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.379328  108223 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.389274  108223 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.389587  108223 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0823 01:29:25.389645  108223 genericapiserver.go:390] Skipping API batch/v2alpha1 because it has no resources.
I0823 01:29:25.396749  108223 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.397289  108223 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.397865  108223 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.399031  108223 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.400211  108223 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.401145  108223 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.401583  108223 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.404033  108223 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.404895  108223 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.410744  108223 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.411581  108223 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0823 01:29:25.411747  108223 genericapiserver.go:390] Skipping API node.k8s.io/v1alpha1 because it has no resources.
I0823 01:29:25.412593  108223 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.412955  108223 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.413573  108223 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.414438  108223 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.415137  108223 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.415897  108223 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.417444  108223 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.418327  108223 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.419960  108223 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.426838  108223 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.427702  108223 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0823 01:29:25.427886  108223 genericapiserver.go:390] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
I0823 01:29:25.428674  108223 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.429441  108223 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0823 01:29:25.429615  108223 genericapiserver.go:390] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
I0823 01:29:25.430301  108223 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.431161  108223 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.431550  108223 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.432295  108223 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.433244  108223 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.435421  108223 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.442351  108223 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0823 01:29:25.442662  108223 genericapiserver.go:390] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
I0823 01:29:25.444127  108223 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.445345  108223 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.445770  108223 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.446565  108223 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.446945  108223 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.447323  108223 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.448193  108223 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.448556  108223 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.448957  108223 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.450604  108223 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.451784  108223 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.452267  108223 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0823 01:29:25.457668  108223 genericapiserver.go:390] Skipping API apps/v1beta2 because it has no resources.
W0823 01:29:25.457840  108223 genericapiserver.go:390] Skipping API apps/v1beta1 because it has no resources.
I0823 01:29:25.458742  108223 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.459568  108223 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.460490  108223 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.461302  108223 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.462177  108223 storage_factory.go:285] storing events.events.k8s.io in events.k8s.io/v1beta1, reading as events.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"1526c12d-f814-46c3-ae3d-307523f1a6fd", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0823 01:29:25.468214  108223 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.550635ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I0823 01:29:25.472027  108223 httplog.go:90] GET /api/v1/services: (1.120409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I0823 01:29:25.473303  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.473332  108223 healthz.go:169] healthz check poststarthook/bootstrap-controller failed: not finished
I0823 01:29:25.473343  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.473356  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.473365  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.473373  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.473399  108223 httplog.go:90] GET /healthz: (200.133µs) 0 [Go-http-client/1.1 127.0.0.1:41372]
E0823 01:29:25.473790  108223 factory.go:594] Error getting pod permit-plugin414c9260-0725-463d-9fd2-e24ff1a64652/test-pod for retry: Get http://127.0.0.1:41583/api/v1/namespaces/permit-plugin414c9260-0725-463d-9fd2-e24ff1a64652/pods/test-pod: dial tcp 127.0.0.1:41583: connect: connection refused; retrying...
I0823 01:29:25.476825  108223 httplog.go:90] GET /api/v1/services: (1.061887ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I0823 01:29:25.479282  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.479312  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.479324  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.479334  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.479342  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.479366  108223 httplog.go:90] GET /healthz: (171.164µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41376]
I0823 01:29:25.490261  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (10.813037ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I0823 01:29:25.493897  108223 httplog.go:90] GET /api/v1/services: (3.506139ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:25.493945  108223 httplog.go:90] POST /api/v1/namespaces: (3.332205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I0823 01:29:25.494113  108223 httplog.go:90] GET /api/v1/services: (3.751021ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41376]
I0823 01:29:25.505934  108223 httplog.go:90] GET /api/v1/namespaces/kube-public: (10.98726ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I0823 01:29:25.511743  108223 httplog.go:90] POST /api/v1/namespaces: (5.190678ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:25.518180  108223 httplog.go:90] GET /api/v1/namespaces/kube-node-lease: (5.941795ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:25.521069  108223 httplog.go:90] POST /api/v1/namespaces: (2.35315ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:25.576197  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.576231  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.576245  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.576255  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.576263  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.576306  108223 httplog.go:90] GET /healthz: (282.919µs) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:25.580434  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.580467  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.580479  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.580488  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.580496  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.580528  108223 httplog.go:90] GET /healthz: (260.975µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:25.674204  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.674241  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.674254  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.674265  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.674274  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.674307  108223 httplog.go:90] GET /healthz: (340.591µs) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:25.680482  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.680528  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.680539  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.680548  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.680556  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.680590  108223 httplog.go:90] GET /healthz: (266.628µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:25.774429  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.774460  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.774472  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.774481  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.774491  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.774519  108223 httplog.go:90] GET /healthz: (232.265µs) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:25.780418  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.780446  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.780458  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.780467  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.780475  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.780505  108223 httplog.go:90] GET /healthz: (238.875µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:25.874234  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.874267  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.874280  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.874290  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.874299  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.874329  108223 httplog.go:90] GET /healthz: (308.942µs) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:25.880439  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.880481  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.880492  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.880502  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.880509  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.880541  108223 httplog.go:90] GET /healthz: (248.737µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:25.974142  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.974186  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.974199  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.974212  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.974220  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.974249  108223 httplog.go:90] GET /healthz: (275.865µs) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:25.980475  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:25.980510  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:25.980523  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:25.980533  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:25.980541  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:25.980573  108223 httplog.go:90] GET /healthz: (248.571µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:26.074138  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:26.074182  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.074195  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.074206  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.074215  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.074245  108223 httplog.go:90] GET /healthz: (261.172µs) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:26.080452  108223 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0823 01:29:26.080487  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.080502  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.080513  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.080521  108223 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.080554  108223 httplog.go:90] GET /healthz: (254.766µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:26.170202  108223 client.go:361] parsed scheme: "endpoint"
I0823 01:29:26.170306  108223 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I0823 01:29:26.176203  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.176231  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.176242  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.176250  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.176292  108223 httplog.go:90] GET /healthz: (2.373561ms) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:26.181508  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.181537  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.181548  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.181556  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.181599  108223 httplog.go:90] GET /healthz: (1.294853ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:26.275113  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.275139  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.275152  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.275168  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.275210  108223 httplog.go:90] GET /healthz: (1.196305ms) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:26.281474  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.281503  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.281513  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.281520  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.281557  108223 httplog.go:90] GET /healthz: (1.225512ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:26.375089  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.375115  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.375124  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.375131  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.375178  108223 httplog.go:90] GET /healthz: (1.166008ms) 0 [Go-http-client/1.1 127.0.0.1:41378]
I0823 01:29:26.383848  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.383878  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.383892  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.383901  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.383963  108223 httplog.go:90] GET /healthz: (1.089795ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:26.468375  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.537204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41376]
I0823 01:29:26.468858  108223 httplog.go:90] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-node-critical: (2.198172ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:26.469122  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.401109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.471421  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.799923ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41376]
I0823 01:29:26.472030  108223 httplog.go:90] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (2.127129ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:26.472263  108223 storage_scheduling.go:139] created PriorityClass system-node-critical with value 2000001000
I0823 01:29:26.473007  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (1.11453ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41376]
I0823 01:29:26.473223  108223 httplog.go:90] GET /api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: (2.910696ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.475948  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (1.541858ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41526]
I0823 01:29:26.476277  108223 httplog.go:90] POST /api/v1/namespaces/kube-system/configmaps: (2.073146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41376]
I0823 01:29:26.476684  108223 httplog.go:90] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-cluster-critical: (3.367927ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I0823 01:29:26.476817  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.476857  108223 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0823 01:29:26.476868  108223 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0823 01:29:26.476876  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0823 01:29:26.476908  108223 httplog.go:90] GET /healthz: (2.835191ms) 0 [Go-http-client/1.1 127.0.0.1:41524]
I0823 01:29:26.478638  108223 httplog.go:90] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.132418ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.478826  108223 storage_scheduling.go:139] created PriorityClass system-cluster-critical with value 2000000000
I0823 01:29:26.478854  108223 storage_scheduling.go:148] all system priority classes are created successfully or already exist.
I0823 01:29:26.481372  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (3.668777ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.482064  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.482092  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.482122  108223 httplog.go:90] GET /healthz: (1.94835ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.483172  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (831.255µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.486623  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (3.136172ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.488364  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (1.34144ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.489709  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (1.008226ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.491268  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (1.227434ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.493478  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.855828ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.493674  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I0823 01:29:26.495059  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (1.230449ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.497166  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.749271ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.497344  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:discovery
I0823 01:29:26.498506  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (1.024107ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.500601  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.769542ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.500771  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I0823 01:29:26.501819  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:public-info-viewer: (916.392µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.503472  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.270781ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.503760  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:public-info-viewer
I0823 01:29:26.505188  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (975.464µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.507250  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.744684ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.507441  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/admin
I0823 01:29:26.509014  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (1.380264ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.511762  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.233626ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.511943  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/edit
I0823 01:29:26.515590  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (1.174185ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.518583  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.407534ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.518758  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/view
I0823 01:29:26.520285  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (984.175µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.524946  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.811119ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.525156  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I0823 01:29:26.526416  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (1.043443ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.529103  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.279069ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.529566  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I0823 01:29:26.530806  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (936.71µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.534825  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.632369ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.535250  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I0823 01:29:26.536524  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (1.083132ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.538992  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.56482ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.539366  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:heapster
I0823 01:29:26.542197  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (2.600375ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.544572  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.999521ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.544901  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node
I0823 01:29:26.545985  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (859.579µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.547659  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.276591ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.547848  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I0823 01:29:26.548990  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (949.585µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.550895  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.5902ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.551237  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I0823 01:29:26.552209  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (769.867µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.553968  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.47219ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.554265  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I0823 01:29:26.554997  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (587.202µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.556604  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.316145ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.556965  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I0823 01:29:26.558010  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (813.596µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.559854  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.413895ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.560029  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I0823 01:29:26.561584  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (1.362334ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.563369  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.355691ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.563550  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I0823 01:29:26.564644  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (896.252µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.566740  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.513668ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.566937  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I0823 01:29:26.567960  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (845.86µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.571459  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.188873ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.571776  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I0823 01:29:26.572904  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (975.025µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.574964  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.574986  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.575017  108223 httplog.go:90] GET /healthz: (1.171641ms) 0 [Go-http-client/1.1 127.0.0.1:41524]
I0823 01:29:26.575459  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.071269ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.575781  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I0823 01:29:26.576751  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-attacher: (794.456µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.579694  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.32693ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.580113  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:csi-external-attacher
I0823 01:29:26.581207  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (916.824µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.582018  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.582064  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.582095  108223 httplog.go:90] GET /healthz: (1.086898ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.583805  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.26548ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.583973  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I0823 01:29:26.585320  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (1.123338ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.587367  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.452716ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.587539  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I0823 01:29:26.589100  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (1.283016ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.591319  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.660092ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.591465  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I0823 01:29:26.592746  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (1.157653ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.595071  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.000182ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.595502  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I0823 01:29:26.596898  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-provisioner: (1.131486ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.599358  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.937802ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.599603  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:csi-external-provisioner
I0823 01:29:26.611960  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (12.191183ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.616137  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.437512ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.616514  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0823 01:29:26.617927  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (1.071897ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.620120  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.76295ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.620572  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0823 01:29:26.622461  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (1.501566ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.624606  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.739383ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.625060  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0823 01:29:26.626333  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (1.003094ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.628441  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.734198ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.628636  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0823 01:29:26.629732  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (928.766µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.633330  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.77812ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.633678  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I0823 01:29:26.635370  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (1.489364ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.637288  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.546927ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.637565  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I0823 01:29:26.639346  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (1.411211ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.641681  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.924443ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.642012  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0823 01:29:26.643750  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (1.388981ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.647374  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.182534ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.647825  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I0823 01:29:26.649016  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (793.819µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.651512  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.9706ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.651692  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0823 01:29:26.652689  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (869.478µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.656466  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.797156ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.656927  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0823 01:29:26.659025  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (1.545393ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.662367  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.723852ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.662716  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I0823 01:29:26.663915  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (998.906µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.667376  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.811541ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.667824  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I0823 01:29:26.669461  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (1.144664ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.671908  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.932853ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.672284  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I0823 01:29:26.673265  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (799.707µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.675538  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.601427ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.675685  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.675702  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.675730  108223 httplog.go:90] GET /healthz: (1.73342ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:26.676080  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0823 01:29:26.681879  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (5.578368ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.683199  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.683231  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.683259  108223 httplog.go:90] GET /healthz: (1.679463ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.686671  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.288421ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.686889  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0823 01:29:26.688878  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (1.095026ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.691526  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.020755ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.691743  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0823 01:29:26.693098  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (848.751µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.695614  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.956761ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.695833  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I0823 01:29:26.696932  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (787.314µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.699107  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.694905ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.699417  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0823 01:29:26.701270  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (1.671425ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.703580  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.668413ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.703895  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I0823 01:29:26.713163  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (9.005699ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.715834  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.996939ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.731068  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I0823 01:29:26.732630  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (1.262952ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.735121  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.90426ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.735662  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I0823 01:29:26.738144  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (1.627723ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.741286  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.620146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.741796  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0823 01:29:26.742938  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (851.804µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.745474  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.67396ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.745851  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I0823 01:29:26.746890  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (859.375µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.749434  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.801804ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.749741  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I0823 01:29:26.750736  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (808.204µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.752781  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.412276ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.752970  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0823 01:29:26.754070  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (886.571µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.769354  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.26985ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.769597  108223 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0823 01:29:26.776680  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.776802  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.777161  108223 httplog.go:90] GET /healthz: (3.231594ms) 0 [Go-http-client/1.1 127.0.0.1:41524]
I0823 01:29:26.781642  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.781819  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.781999  108223 httplog.go:90] GET /healthz: (1.546256ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.788633  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.526219ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.810282  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.927171ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.810672  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I0823 01:29:26.829293  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (2.175667ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.849829  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.616982ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.850719  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I0823 01:29:26.868446  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (1.404354ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.876028  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.876080  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.876121  108223 httplog.go:90] GET /healthz: (2.213312ms) 0 [Go-http-client/1.1 127.0.0.1:41524]
I0823 01:29:26.883828  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.883873  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.883912  108223 httplog.go:90] GET /healthz: (2.531766ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.889371  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.370383ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:26.889600  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I0823 01:29:26.908410  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:public-info-viewer: (1.377357ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.929520  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.335963ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.929797  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:public-info-viewer
I0823 01:29:26.948655  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (1.404373ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.969412  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.300633ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.969859  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I0823 01:29:26.975874  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.975898  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.975941  108223 httplog.go:90] GET /healthz: (2.000048ms) 0 [Go-http-client/1.1 127.0.0.1:41524]
I0823 01:29:26.981090  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:26.981136  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:26.981172  108223 httplog.go:90] GET /healthz: (876.142µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:26.988312  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (1.287814ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.009602  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.456901ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.009848  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I0823 01:29:27.028375  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (1.23305ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.050515  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.338663ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.050788  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I0823 01:29:27.070237  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (1.768625ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.076278  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.076318  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.076361  108223 httplog.go:90] GET /healthz: (2.082878ms) 0 [Go-http-client/1.1 127.0.0.1:41524]
I0823 01:29:27.081564  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.081594  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.081637  108223 httplog.go:90] GET /healthz: (1.332342ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.090528  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.673083ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.090806  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I0823 01:29:27.109643  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (2.535072ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.129878  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.83173ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.130588  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I0823 01:29:27.148273  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (1.177922ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.169465  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.759111ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:27.169733  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I0823 01:29:27.185682  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.185719  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.185771  108223 httplog.go:90] GET /healthz: (5.107346ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.185869  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.185880  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.185904  108223 httplog.go:90] GET /healthz: (8.292309ms) 0 [Go-http-client/1.1 127.0.0.1:41524]
I0823 01:29:27.191776  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (2.702201ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.210725  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.26214ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.212860  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0823 01:29:27.228253  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.169198ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.250390  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.311447ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.250646  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0823 01:29:27.269833  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (2.681441ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.276310  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.276341  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.276379  108223 httplog.go:90] GET /healthz: (1.288642ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:27.283265  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.283299  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.283346  108223 httplog.go:90] GET /healthz: (2.871482ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.289458  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.335544ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.289661  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0823 01:29:27.309935  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (2.72931ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.329930  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.545297ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.330491  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0823 01:29:27.348493  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (1.366907ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.369554  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.43794ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.369874  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I0823 01:29:27.375322  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.375354  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.375401  108223 httplog.go:90] GET /healthz: (1.417521ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:27.382078  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.382133  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.382186  108223 httplog.go:90] GET /healthz: (1.550988ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.388303  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (1.283927ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.412587  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.101058ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.413262  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I0823 01:29:27.428753  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.605139ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.449924  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.745205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.450436  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0823 01:29:27.468882  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (1.778521ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.476353  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.476386  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.476424  108223 httplog.go:90] GET /healthz: (2.454599ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:27.481313  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.481343  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.481382  108223 httplog.go:90] GET /healthz: (1.071105ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.489869  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.788509ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.490090  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I0823 01:29:27.508556  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.417853ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.532077  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (4.889736ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.533478  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0823 01:29:27.548286  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.235153ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.569292  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.164676ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.569517  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0823 01:29:27.575765  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.575805  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.575857  108223 httplog.go:90] GET /healthz: (1.868802ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:27.581855  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.581888  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.581940  108223 httplog.go:90] GET /healthz: (1.448524ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.588479  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (1.371294ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.609640  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.492097ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.609914  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I0823 01:29:27.628778  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (1.615871ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.650006  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.798207ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.650781  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I0823 01:29:27.669023  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (1.871199ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.675069  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.675102  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.675144  108223 httplog.go:90] GET /healthz: (1.182579ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:27.681417  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.681450  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.681490  108223 httplog.go:90] GET /healthz: (1.149091ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.689268  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.177022ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.689501  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I0823 01:29:27.708443  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (1.327176ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.729946  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.813014ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.734287  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0823 01:29:27.748561  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (1.430643ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.769273  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.164608ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.769586  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0823 01:29:27.775060  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.775092  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.775132  108223 httplog.go:90] GET /healthz: (1.171118ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:27.781449  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.781489  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.781561  108223 httplog.go:90] GET /healthz: (1.277893ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.788605  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (1.502958ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.809623  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.45716ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.810138  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0823 01:29:27.828898  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (1.781678ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.852631  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.122135ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.852901  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I0823 01:29:27.868690  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (1.553022ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.875195  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.875230  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.875269  108223 httplog.go:90] GET /healthz: (1.275672ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:27.887457  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.887495  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.887538  108223 httplog.go:90] GET /healthz: (1.105565ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.897797  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.528126ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.898072  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0823 01:29:27.908593  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.392528ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.935989  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (7.29374ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.936472  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I0823 01:29:27.948593  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.467774ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.970252  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.804446ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.970544  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I0823 01:29:27.977498  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.977532  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.977574  108223 httplog.go:90] GET /healthz: (2.541145ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:27.981456  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:27.981484  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:27.981524  108223 httplog.go:90] GET /healthz: (1.270924ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:27.988526  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (1.411789ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.012788  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.640381ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.013103  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I0823 01:29:28.028194  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.103542ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.059413  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (7.700101ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.059895  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0823 01:29:28.068841  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (1.720058ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.076347  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.076383  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.076437  108223 httplog.go:90] GET /healthz: (1.690066ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:28.081180  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.081208  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.081262  108223 httplog.go:90] GET /healthz: (954.669µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.089422  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.40447ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.089720  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I0823 01:29:28.108661  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (1.507472ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.129725  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.646839ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.130004  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I0823 01:29:28.148712  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (1.508989ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.169631  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.47586ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.170140  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0823 01:29:28.175169  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.175206  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.175249  108223 httplog.go:90] GET /healthz: (1.285307ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:28.181514  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.181543  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.181582  108223 httplog.go:90] GET /healthz: (1.281325ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.188148  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (1.103309ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.209843  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.674073ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.210108  108223 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0823 01:29:28.228651  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (1.448162ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.231272  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.10776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.253730  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (6.113389ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.254023  108223 storage_rbac.go:278] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I0823 01:29:28.268651  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (1.520035ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.273228  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.109776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.275466  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.275494  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.275539  108223 httplog.go:90] GET /healthz: (1.613486ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:28.282559  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.282593  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.282647  108223 httplog.go:90] GET /healthz: (2.416406ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.289151  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.033272ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.289402  108223 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0823 01:29:28.308888  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.711691ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.316429  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.045762ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.331535  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (4.008754ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.332570  108223 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0823 01:29:28.357273  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (1.587391ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.360732  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (3.060418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.369529  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.472122ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.369778  108223 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0823 01:29:28.376615  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.376644  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.376693  108223 httplog.go:90] GET /healthz: (2.31253ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:28.383894  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.383926  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.383968  108223 httplog.go:90] GET /healthz: (1.39288ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.391313  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (2.230881ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.394606  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.605875ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.420805  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (13.693841ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.421234  108223 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0823 01:29:28.428552  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (1.53642ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.430618  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.662591ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.454564  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (7.424853ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.454880  108223 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0823 01:29:28.468716  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (1.512982ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.471295  108223 httplog.go:90] GET /api/v1/namespaces/kube-public: (2.054396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.474704  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.474732  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.474765  108223 httplog.go:90] GET /healthz: (905.039µs) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:28.481587  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.481621  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.481668  108223 httplog.go:90] GET /healthz: (1.289608ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.492419  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (4.444844ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.492808  108223 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0823 01:29:28.508860  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::extension-apiserver-authentication-reader: (1.405165ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.511207  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.772453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.530540  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (1.72793ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.530749  108223 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::extension-apiserver-authentication-reader in kube-system
I0823 01:29:28.548614  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (1.472039ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.551888  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.042871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.569385  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.3062ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.569612  108223 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0823 01:29:28.575255  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.575298  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.575335  108223 httplog.go:90] GET /healthz: (1.366885ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:28.583005  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.583051  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.583089  108223 httplog.go:90] GET /healthz: (1.417689ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.591471  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (1.134312ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.593915  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.238544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.609763  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.188932ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.610152  108223 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0823 01:29:28.628675  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (1.438834ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.630804  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.21657ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.653243  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (6.085118ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.653609  108223 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0823 01:29:28.668896  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (1.786719ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.671715  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.273914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.675900  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.675924  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.675979  108223 httplog.go:90] GET /healthz: (2.032334ms) 0 [Go-http-client/1.1 127.0.0.1:41528]
I0823 01:29:28.682051  108223 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0823 01:29:28.682080  108223 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0823 01:29:28.682120  108223 httplog.go:90] GET /healthz: (1.854418ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.691619  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.897909ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.691912  108223 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0823 01:29:28.720330  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (13.214659ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.724402  108223 httplog.go:90] GET /api/v1/namespaces/kube-system: (3.118674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.729867  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.364311ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.730359  108223 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0823 01:29:28.753657  108223 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (6.439201ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.756510  108223 httplog.go:90] GET /api/v1/namespaces/kube-public: (2.10444ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.769092  108223 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (1.961192ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.769344  108223 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0823 01:29:28.780717  108223 httplog.go:90] GET /healthz: (6.765407ms) 200 [Go-http-client/1.1 127.0.0.1:41528]
W0823 01:29:28.781510  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781568  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781581  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781600  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781611  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781623  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781636  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781649  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781659  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781670  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0823 01:29:28.781727  108223 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
I0823 01:29:28.781752  108223 factory.go:294] Creating scheduler from algorithm provider 'DefaultProvider'
I0823 01:29:28.781762  108223 factory.go:382] Creating scheduler with fit predicates 'map[CheckNodeCondition:{} CheckNodeDiskPressure:{} CheckNodeMemoryPressure:{} CheckNodePIDPressure:{} CheckVolumeBinding:{} GeneralPredicates:{} MatchInterPodAffinity:{} MaxAzureDiskVolumeCount:{} MaxCSIVolumeCountPred:{} MaxEBSVolumeCount:{} MaxGCEPDVolumeCount:{} NoDiskConflict:{} NoVolumeZoneConflict:{} PodToleratesNodeTaints:{}]' and priority functions 'map[BalancedResourceAllocation:{} ImageLocalityPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} NodeAffinityPriority:{} NodePreferAvoidPodsPriority:{} SelectorSpreadPriority:{} TaintTolerationPriority:{}]'
I0823 01:29:28.781979  108223 controller_utils.go:1029] Waiting for caches to sync for scheduler controller
I0823 01:29:28.782259  108223 reflector.go:120] Starting reflector *v1.Pod (12h0m0s) from k8s.io/kubernetes/test/integration/scheduler/util.go:230
I0823 01:29:28.782286  108223 reflector.go:158] Listing and watching *v1.Pod from k8s.io/kubernetes/test/integration/scheduler/util.go:230
I0823 01:29:28.783258  108223 httplog.go:90] GET /api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: (674.764µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:29:28.783443  108223 httplog.go:90] GET /healthz: (3.132911ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:28.784686  108223 get.go:250] Starting watch for /api/v1/pods, rv=29822 labels= fields=status.phase!=Failed,status.phase!=Succeeded timeout=6m42s
I0823 01:29:28.785476  108223 httplog.go:90] GET /api/v1/namespaces/default: (1.69596ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:28.789133  108223 httplog.go:90] POST /api/v1/namespaces: (3.326973ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:28.793963  108223 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (4.485547ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:28.798720  108223 httplog.go:90] POST /api/v1/namespaces/default/services: (4.273603ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:28.800578  108223 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.527163ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:28.802984  108223 httplog.go:90] POST /api/v1/namespaces/default/endpoints: (2.071815ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:28.882174  108223 shared_informer.go:211] caches populated
I0823 01:29:28.882203  108223 controller_utils.go:1036] Caches are synced for scheduler controller
I0823 01:29:28.882507  108223 reflector.go:120] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.882536  108223 reflector.go:158] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.882562  108223 reflector.go:120] Starting reflector *v1beta1.CSINode (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.882579  108223 reflector.go:158] Listing and watching *v1beta1.CSINode from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.882904  108223 reflector.go:120] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.882921  108223 reflector.go:158] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.883367  108223 reflector.go:120] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.883385  108223 reflector.go:158] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.883501  108223 httplog.go:90] GET /apis/storage.k8s.io/v1beta1/csinodes?limit=500&resourceVersion=0: (634.871µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:29:28.884275  108223 reflector.go:120] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.884296  108223 reflector.go:158] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.884542  108223 httplog.go:90] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (675.028µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41918]
I0823 01:29:28.884686  108223 reflector.go:120] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.884701  108223 reflector.go:158] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.885000  108223 httplog.go:90] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (1.09595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41922]
I0823 01:29:28.885018  108223 reflector.go:120] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.885031  108223 reflector.go:158] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.885462  108223 reflector.go:120] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.885478  108223 reflector.go:158] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.885802  108223 reflector.go:120] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.885814  108223 reflector.go:158] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.886446  108223 httplog.go:90] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (512.873µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41924]
I0823 01:29:28.886753  108223 httplog.go:90] GET /api/v1/nodes?limit=500&resourceVersion=0: (440.159µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41918]
I0823 01:29:28.886992  108223 httplog.go:90] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (438.696µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41926]
I0823 01:29:28.887498  108223 get.go:250] Starting watch for /api/v1/persistentvolumes, rv=29821 labels= fields= timeout=6m9s
I0823 01:29:28.888460  108223 httplog.go:90] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (469.846µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41932]
I0823 01:29:28.888472  108223 get.go:250] Starting watch for /apis/apps/v1/replicasets, rv=29828 labels= fields= timeout=7m7s
I0823 01:29:28.888822  108223 get.go:250] Starting watch for /api/v1/nodes, rv=29822 labels= fields= timeout=8m53s
I0823 01:29:28.888896  108223 get.go:250] Starting watch for /api/v1/replicationcontrollers, rv=29822 labels= fields= timeout=5m29s
I0823 01:29:28.889592  108223 httplog.go:90] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (341.178µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41932]
I0823 01:29:28.889932  108223 get.go:250] Starting watch for /apis/apps/v1/statefulsets, rv=29828 labels= fields= timeout=5m43s
I0823 01:29:28.890139  108223 get.go:250] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=29828 labels= fields= timeout=8m5s
I0823 01:29:28.890785  108223 get.go:250] Starting watch for /api/v1/persistentvolumeclaims, rv=29821 labels= fields= timeout=5m45s
I0823 01:29:28.891464  108223 httplog.go:90] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (4.421938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41934]
I0823 01:29:28.891999  108223 reflector.go:120] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.892020  108223 reflector.go:158] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:133
I0823 01:29:28.892380  108223 get.go:250] Starting watch for /apis/storage.k8s.io/v1beta1/csinodes, rv=29828 labels= fields= timeout=5m20s
I0823 01:29:28.892961  108223 get.go:250] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=29827 labels= fields= timeout=7m3s
I0823 01:29:28.893301  108223 httplog.go:90] GET /api/v1/services?limit=500&resourceVersion=0: (579.136µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41934]
I0823 01:29:28.895149  108223 get.go:250] Starting watch for /api/v1/services, rv=30234 labels= fields= timeout=9m28s
I0823 01:29:28.982871  108223 shared_informer.go:211] caches populated
I0823 01:29:29.083164  108223 shared_informer.go:211] caches populated
I0823 01:29:29.185600  108223 shared_informer.go:211] caches populated
I0823 01:29:29.286410  108223 shared_informer.go:211] caches populated
I0823 01:29:29.404426  108223 shared_informer.go:211] caches populated
I0823 01:29:29.504622  108223 shared_informer.go:211] caches populated
I0823 01:29:29.604761  108223 shared_informer.go:211] caches populated
I0823 01:29:29.704960  108223 shared_informer.go:211] caches populated
I0823 01:29:29.805116  108223 shared_informer.go:211] caches populated
I0823 01:29:29.886319  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:29.888787  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:29.890007  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:29.890566  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:29.891977  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:29.895346  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:29.905345  108223 shared_informer.go:211] caches populated
I0823 01:29:30.006359  108223 shared_informer.go:211] caches populated
I0823 01:29:30.009480  108223 httplog.go:90] POST /api/v1/nodes: (2.543691ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.010286  108223 node_tree.go:93] Added node "testnode" in group "" to NodeTree
I0823 01:29:30.012946  108223 httplog.go:90] PUT /api/v1/nodes/testnode/status: (2.458105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.015479  108223 httplog.go:90] POST /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods: (2.116545ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.015974  108223 scheduling_queue.go:830] About to try and schedule pod node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pidpressure-fake-name
I0823 01:29:30.015989  108223 scheduler.go:530] Attempting to schedule pod: node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pidpressure-fake-name
I0823 01:29:30.016144  108223 scheduler_binder.go:256] AssumePodVolumes for pod "node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pidpressure-fake-name", node "testnode"
I0823 01:29:30.016161  108223 scheduler_binder.go:266] AssumePodVolumes for pod "node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pidpressure-fake-name", node "testnode": all PVCs bound and nothing to do
I0823 01:29:30.016215  108223 factory.go:610] Attempting to bind pidpressure-fake-name to testnode
I0823 01:29:30.018915  108223 httplog.go:90] POST /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name/binding: (2.278942ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.019120  108223 scheduler.go:667] pod node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pidpressure-fake-name is bound successfully on node "testnode", 1 nodes evaluated, 1 nodes were found feasible. Bound node resource: "Capacity: CPU<0>|Memory<0>|Pods<32>|StorageEphemeral<0>; Allocatable: CPU<0>|Memory<0>|Pods<32>|StorageEphemeral<0>.".
I0823 01:29:30.021971  108223 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/events: (2.538201ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.117799  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.58858ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.217853  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.672138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.317849  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.653665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.418153  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.666968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.519699  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.518748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.619103  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.93106ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.717810  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.541915ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.818052  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.898641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:30.886522  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:30.888969  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:30.890220  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:30.890966  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:30.892112  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:30.895600  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:30.919291  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.664828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.017962  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.666512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.117950  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.677823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.217891  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.742605ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.317763  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.615632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.418188  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.926716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.518817  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.817374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.618382  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.96958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.717829  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.627212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.839586  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.672023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:31.886732  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:31.889126  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:31.890383  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:31.891102  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:31.892264  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:31.895774  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:31.918809  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.619654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.018694  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.522832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.118031  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.837237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.218338  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.145751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.318828  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.401002ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.418400  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.171332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.518213  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.957769ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.624519  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (8.307106ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
E0823 01:29:32.666334  108223 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:41583/apis/events.k8s.io/v1beta1/namespaces/permit-plugin414c9260-0725-463d-9fd2-e24ff1a64652/events: dial tcp 127.0.0.1:41583: connect: connection refused' (may retry after sleeping)
I0823 01:29:32.718301  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.015231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.817973  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.748719ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:32.886939  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:32.889337  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:32.890567  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:32.891288  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:32.892439  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:32.895966  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:32.918160  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.867293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.018174  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.90155ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.117905  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.74269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.218091  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.884414ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.394394  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (78.194579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.418080  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.808775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.518212  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.926724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.618136  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.880683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.721281  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.77992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.817658  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.493661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:33.887146  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:33.889553  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:33.890754  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:33.891437  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:33.892603  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:33.896122  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:33.920618  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (4.366237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.018281  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.029836ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.118159  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.971028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.218172  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.952122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.318020  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.815446ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.417420  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.316476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.518128  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.902952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.618891  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.444041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.718223  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.919697ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.833185  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.438416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:34.887328  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:34.889776  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:34.890927  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:34.892920  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:34.892957  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:34.896284  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:34.920809  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.885489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.020969  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (4.611642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.117846  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.629184ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.217759  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.415141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.318552  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.7238ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.418327  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.101492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.518201  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.884999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.618005  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.77456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.717856  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.731414ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.817850  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.686244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:35.887471  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:35.893176  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:35.893245  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:35.896505  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:35.897231  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:35.897276  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:35.918219  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.978879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.017991  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.824494ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.117950  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.746053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.218127  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.806486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.318003  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.834845ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.419443  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.019052ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.518195  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.900907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.617963  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.781405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.718540  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.305167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.822516  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (6.005937ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:36.887592  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:36.893410  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:36.893419  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:36.896805  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:36.897425  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:36.897521  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:36.918353  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.64291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.018297  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.089292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.117740  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.550898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.219347  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.092648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.320772  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.196055ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.418323  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.096623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.517915  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.667301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.626003  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.796393ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.718183  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.82068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.818164  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.980425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:37.889552  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:37.893519  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:37.893877  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:37.896993  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:37.897636  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:37.897676  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:37.918023  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.822056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.051780  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.54694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.118186  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.929649ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.218144  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.90518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.334935  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (18.685925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.417947  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.767379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.518101  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.799276ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.618090  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.776152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.717814  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.583995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.786191  108223 httplog.go:90] GET /api/v1/namespaces/default: (2.002431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.791470  108223 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (4.739177ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.793980  108223 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.974178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.822590  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.968477ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:38.889878  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:38.893696  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:38.894027  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:38.897399  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:38.897926  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:38.897942  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:38.917985  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.791913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.018416  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.20234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.118611  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.220349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.218511  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.979267ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.318194  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.04039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.418018  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.827855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.518127  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.861565ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.618032  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.840986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.719183  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.827777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.818354  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.112909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:39.890134  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:39.894087  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:39.894194  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:39.898125  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:39.898339  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:39.898458  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:39.917959  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.72033ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.018693  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.546146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.117843  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.627385ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.218123  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.870036ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.318027  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.772924ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.418157  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.924947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.518656  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.440053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.618013  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.833789ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.718445  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.200469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.818071  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.768908ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:40.890278  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:40.894416  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:40.894473  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:40.898252  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:40.898487  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:40.898627  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:40.918266  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.036168ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.018007  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.805208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.118899  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.644408ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.218103  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.853798ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.318157  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.958566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.417888  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.658901ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.519071  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.897176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.617987  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.758985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.717836  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.670552ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.819207  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.885087ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:41.890401  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:41.894628  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:41.894685  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:41.898426  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:41.898708  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:41.898980  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:41.918276  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.06946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.018008  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.772155ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.118120  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.898242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.218189  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.970102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.318342  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.894885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.420179  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.883678ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.518305  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.086086ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.617768  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.554517ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.718302  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.981686ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.818655  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.300849ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:42.890614  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:42.894826  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:42.894879  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:42.898607  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:42.898897  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:42.899134  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:42.918489  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.233105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.018116  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.764574ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.117815  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.466885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.219292  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.135596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.319607  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.030563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.417712  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.534493ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
E0823 01:29:43.458009  108223 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:41583/apis/events.k8s.io/v1beta1/namespaces/permit-plugin414c9260-0725-463d-9fd2-e24ff1a64652/events: dial tcp 127.0.0.1:41583: connect: connection refused' (may retry after sleeping)
I0823 01:29:43.518333  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.143581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.619388  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.162784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.721989  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (5.809113ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.818222  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.025437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:43.890741  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:43.895009  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:43.895114  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:43.898787  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:43.899178  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:43.899404  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:43.917708  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.523369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.018396  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.848395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.120351  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.740576ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.217789  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.589667ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.318286  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.02199ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.418695  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.07406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.517785  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.481662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.617927  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.682864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.718363  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.103199ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.821945  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (5.711977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:44.890950  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:44.895282  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:44.895344  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:44.899003  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:44.899294  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:44.899609  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:44.918228  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.003531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.018165  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.962227ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.117986  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.747424ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.217890  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.700308ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.318570  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.288133ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.417999  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.60729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.517966  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.76293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.617928  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.704699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.717812  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.642903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.818095  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.914267ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:45.891115  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:45.895452  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:45.895504  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:45.899154  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:45.899361  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:45.899748  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:45.917614  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.449569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.018154  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.966729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.117818  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.620683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.217635  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.497655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.317787  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.664014ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.417895  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.713379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.517973  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.636527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.617694  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.467285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.718435  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.642784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.818087  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.835325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:46.891859  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:46.896181  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:46.896225  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:46.899268  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:46.899510  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:46.899921  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:46.918315  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.989365ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.017750  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.568264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.118207  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.946073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.218249  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.89915ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.318546  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.282557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.418557  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.304222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.518010  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.776777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.618006  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.836668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.718064  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.818427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.818284  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.958528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:47.891987  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:47.896357  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:47.896405  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:47.899450  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:47.899864  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:47.900102  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:47.918119  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.834671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.018271  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.949665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.118312  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.099761ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.218094  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.902402ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.318054  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.816682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.418019  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.755138ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.525876  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.091985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.619588  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.769748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.718618  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.270116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.786138  108223 httplog.go:90] GET /api/v1/namespaces/default: (1.814943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.788004  108223 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.428489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.789979  108223 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.582274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.818740  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.424109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:48.892205  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:48.896547  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:48.896547  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:48.899632  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:48.900065  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:48.900324  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:48.919097  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.815663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.018215  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.990221ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.117978  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.791122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.218699  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.489223ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.317911  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.704437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.418267  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.057009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.518380  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.068858ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.618196  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.977886ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.718247  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.910167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.818267  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.090387ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:49.892388  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:49.896736  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:49.896914  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:49.899838  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:49.900264  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:49.900502  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:49.918443  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.268429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.018711  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.049963ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.121081  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (4.827983ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.220203  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.791864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.318245  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.02382ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.418141  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.918861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.518138  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.903723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.618086  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.780959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.718000  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.764146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.818008  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.782145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:50.892625  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:50.896977  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:50.897090  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:50.900219  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:50.900577  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:50.900775  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:50.917835  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.659835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.018208  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.026261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
E0823 01:29:51.074315  108223 factory.go:594] Error getting pod permit-plugin414c9260-0725-463d-9fd2-e24ff1a64652/test-pod for retry: Get http://127.0.0.1:41583/api/v1/namespaces/permit-plugin414c9260-0725-463d-9fd2-e24ff1a64652/pods/test-pod: dial tcp 127.0.0.1:41583: connect: connection refused; retrying...
I0823 01:29:51.118116  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.584567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.217936  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.64118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.318814  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.029298ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.419532  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.317065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.518269  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.653228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.618150  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.95454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.718293  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.002732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.818327  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.140011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:51.892901  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:51.897124  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:51.898017  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:51.900406  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:51.900729  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:51.901124  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:51.918759  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.572039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.018152  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.986623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.117790  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.631421ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.217522  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.364959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.318824  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.684286ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.417900  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.718873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.518693  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.564795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.618072  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.842724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.717632  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.431914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.818455  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.28504ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:52.893125  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:52.897292  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:52.898693  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:52.900502  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:52.900877  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:52.901655  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:52.917804  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.651396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.017934  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.722239ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.117771  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.584929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.218505  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.206861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.319058  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.691483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.419854  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.404814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.518002  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.777673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
E0823 01:29:53.532932  108223 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:41583/apis/events.k8s.io/v1beta1/namespaces/permit-plugin414c9260-0725-463d-9fd2-e24ff1a64652/events: dial tcp 127.0.0.1:41583: connect: connection refused' (may retry after sleeping)
I0823 01:29:53.618452  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.645107ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.720594  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.598252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.818704  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.531549ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:53.893315  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:53.897516  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:53.898808  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:53.900780  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:53.901032  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:53.901854  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:53.918121  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.751149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.018301  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.023024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.118029  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.800299ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.218286  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.049026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.318017  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.784307ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.418426  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.168475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.518141  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.92328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.617771  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.620475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.718512  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.273875ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.818132  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.930165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:54.898995  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:54.900910  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:54.901235  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:54.902031  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:54.913238  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:54.913293  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:54.920054  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.917421ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.017834  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.65397ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.117816  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.658094ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.218089  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.94399ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.317835  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.683297ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.418087  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.897074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.518232  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.957209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.618442  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.158656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.718334  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.178995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.817948  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.585217ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:55.899212  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:55.901008  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:55.901417  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:55.902317  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:55.913410  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:55.913456  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:55.917875  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.679857ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.019262  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.946783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.118023  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.791883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.217928  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.733489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.317959  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.748009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.418257  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.929774ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.518795  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.871487ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.620131  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (3.88623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.718257  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.018057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.817921  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.702235ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:56.899416  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:56.901189  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:56.901594  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:56.902621  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:56.913581  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:56.913643  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:56.918124  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.861775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.017912  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.754465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.118007  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.817607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.218385  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.942245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.317923  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.656007ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.418266  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.939781ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.518141  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.931523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.618168  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.945543ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.717963  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.751556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.818123  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.8599ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:57.899530  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:57.901354  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:57.901734  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:57.902845  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:57.913765  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:57.913814  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:57.918120  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.868109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.018281  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.056276ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.118383  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.034882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.218032  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.859684ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.318956  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.795173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.418312  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.089668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.518376  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.090941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.618103  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.892783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.718485  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.959199ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.786346  108223 httplog.go:90] GET /api/v1/namespaces/default: (1.816099ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.788375  108223 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.462326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.790728  108223 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.903068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.819098  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.43025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:58.899716  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:58.901544  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:58.901879  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:58.903602  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:58.913949  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:58.913949  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:58.918245  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.885775ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.019910  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.441215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.117864  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.690865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.218275  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.094615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.317939  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.67857ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.417941  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.693783ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.517989  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.737023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.618438  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.245349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.721768  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (2.019735ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.817930  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.710602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:29:59.899967  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:59.902250  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:59.902282  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:59.904477  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:59.914134  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:59.914183  108223 reflector.go:241] k8s.io/client-go/informers/factory.go:133: forcing resync
I0823 01:29:59.917907  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.741745ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:30:00.018081  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.580141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:30:00.020551  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.75936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:30:00.038006  108223 httplog.go:90] DELETE /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (16.662805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:30:00.042205  108223 httplog.go:90] GET /api/v1/namespaces/node-pid-pressure22325fa8-1f63-48a5-bb11-554d5289ec35/pods/pidpressure-fake-name: (1.67366ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
E0823 01:30:00.043249  108223 scheduling_queue.go:833] Error while retrieving next pod from scheduling queue: scheduling queue is closed
I0823 01:30:00.043666  108223 httplog.go:90] GET /api/v1/replicationcontrollers?allowWatchBookmarks=true&resourceVersion=29822&timeout=5m29s&timeoutSeconds=329&watch=true: (31.155042013s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41926]
I0823 01:30:00.043817  108223 httplog.go:90] GET /apis/apps/v1/replicasets?allowWatchBookmarks=true&resourceVersion=29828&timeout=7m7s&timeoutSeconds=427&watch=true: (31.15563787s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41928]
I0823 01:30:00.043950  108223 httplog.go:90] GET /apis/apps/v1/statefulsets?allowWatchBookmarks=true&resourceVersion=29828&timeout=5m43s&timeoutSeconds=343&watch=true: (31.154314025s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41940]
I0823 01:30:00.044111  108223 httplog.go:90] GET /apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=29828&timeout=8m5s&timeoutSeconds=485&watch=true: (31.154201779s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41932]
I0823 01:30:00.044242  108223 httplog.go:90] GET /api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=29821&timeout=5m45s&timeoutSeconds=345&watch=true: (31.153757008s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41922]
I0823 01:30:00.044358  108223 httplog.go:90] GET /apis/storage.k8s.io/v1beta1/csinodes?allowWatchBookmarks=true&resourceVersion=29828&timeout=5m20s&timeoutSeconds=320&watch=true: (31.152289674s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41524]
I0823 01:30:00.044472  108223 httplog.go:90] GET /apis/policy/v1beta1/poddisruptionbudgets?allowWatchBookmarks=true&resourceVersion=29827&timeout=7m3s&timeoutSeconds=423&watch=true: (31.151768043s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41948]
I0823 01:30:00.044581  108223 httplog.go:90] GET /api/v1/services?allowWatchBookmarks=true&resourceVersion=30234&timeout=9m28s&timeoutSeconds=568&watch=true: (31.149791189s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41934]
I0823 01:30:00.044693  108223 httplog.go:90] GET /api/v1/nodes?allowWatchBookmarks=true&resourceVersion=29822&timeout=8m53s&timeoutSeconds=533&watch=true: (31.156138247s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41924]
I0823 01:30:00.044820  108223 httplog.go:90] GET /api/v1/pods?allowWatchBookmarks=true&fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&resourceVersion=29822&timeoutSeconds=402&watch=true: (31.26047881s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41528]
I0823 01:30:00.047351  108223 httplog.go:90] GET /api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=29821&timeout=6m9s&timeoutSeconds=369&watch=true: (31.160197256s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41930]
I0823 01:30:00.053806  108223 httplog.go:90] DELETE /api/v1/nodes: (8.859555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:30:00.054105  108223 controller.go:176] Shutting down kubernetes service endpoint reconciler
I0823 01:30:00.055786  108223 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.400163ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
I0823 01:30:00.058083  108223 httplog.go:90] PUT /api/v1/namespaces/default/endpoints/kubernetes: (1.806582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41950]
--- FAIL: TestNodePIDPressure (34.89s)
    predicates_test.go:924: Test Failed: error, timed out waiting for the condition, while waiting for scheduled

				from junit_eb089aee80105aff5db0557ae4449d31f19359f2_20190823-012127.xml

Find permit-plugin414c9260-0725-463d-9fd2-e24ff1a64652/test-pod mentions in log files | View test history on testgrid


Show 2810 Passed Tests

Show 4 Skipped Tests

Error lines from build-log.txt

... skipping 575 lines ...
W0823 01:16:08.419] I0823 01:16:08.417565   52841 graph_builder.go:282] GraphBuilder running
W0823 01:16:08.419] I0823 01:16:08.419085   52841 controllermanager.go:535] Started "attachdetach"
W0823 01:16:08.419] W0823 01:16:08.419101   52841 controllermanager.go:514] "tokencleaner" is disabled
W0823 01:16:08.419] I0823 01:16:08.419132   52841 attach_detach_controller.go:335] Starting attach detach controller
W0823 01:16:08.420] I0823 01:16:08.419151   52841 controller_utils.go:1029] Waiting for caches to sync for attach detach controller
W0823 01:16:08.420] I0823 01:16:08.419381   52841 node_lifecycle_controller.go:77] Sending events to api server
W0823 01:16:08.420] E0823 01:16:08.419419   52841 core.go:175] failed to start cloud node lifecycle controller: no cloud provider provided
W0823 01:16:08.421] W0823 01:16:08.419431   52841 controllermanager.go:527] Skipping "cloud-node-lifecycle"
W0823 01:16:08.421] I0823 01:16:08.419726   52841 controllermanager.go:535] Started "pvc-protection"
W0823 01:16:08.421] I0823 01:16:08.420321   52841 controllermanager.go:535] Started "deployment"
W0823 01:16:08.421] W0823 01:16:08.420343   52841 controllermanager.go:527] Skipping "ttl-after-finished"
W0823 01:16:08.422] I0823 01:16:08.420702   52841 pvc_protection_controller.go:101] Starting PVC protection controller
W0823 01:16:08.422] I0823 01:16:08.420733   52841 controller_utils.go:1029] Waiting for caches to sync for PVC protection controller
... skipping 49 lines ...
W0823 01:16:08.932] I0823 01:16:08.630066   52841 controllermanager.go:535] Started "namespace"
W0823 01:16:08.932] I0823 01:16:08.630255   52841 namespace_controller.go:186] Starting namespace controller
W0823 01:16:08.932] I0823 01:16:08.630284   52841 controller_utils.go:1029] Waiting for caches to sync for namespace controller
W0823 01:16:08.932] I0823 01:16:08.630468   52841 controllermanager.go:535] Started "disruption"
W0823 01:16:08.932] I0823 01:16:08.630733   52841 disruption.go:333] Starting disruption controller
W0823 01:16:08.933] I0823 01:16:08.630786   52841 controller_utils.go:1029] Waiting for caches to sync for disruption controller
W0823 01:16:08.933] E0823 01:16:08.630836   52841 core.go:78] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail
W0823 01:16:08.933] W0823 01:16:08.630847   52841 controllermanager.go:527] Skipping "service"
W0823 01:16:08.933] I0823 01:16:08.630856   52841 core.go:185] Will not configure cloud provider routes for allocate-node-cidrs: false, configure-cloud-routes: true.
W0823 01:16:08.933] W0823 01:16:08.630861   52841 controllermanager.go:527] Skipping "route"
W0823 01:16:08.933] I0823 01:16:08.631653   52841 controllermanager.go:535] Started "persistentvolume-expander"
W0823 01:16:08.933] I0823 01:16:08.631730   52841 expand_controller.go:301] Starting expand controller
W0823 01:16:08.934] I0823 01:16:08.631906   52841 controller_utils.go:1029] Waiting for caches to sync for expand controller
W0823 01:16:08.934] W0823 01:16:08.643552   52841 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist
W0823 01:16:08.934] I0823 01:16:08.702568   52841 controller_utils.go:1036] Caches are synced for certificate controller
W0823 01:16:08.934] I0823 01:16:08.709764   52841 controller_utils.go:1036] Caches are synced for TTL controller
W0823 01:16:08.934] I0823 01:16:08.710815   52841 controller_utils.go:1036] Caches are synced for ClusterRoleAggregator controller
W0823 01:16:08.934] E0823 01:16:08.721468   52841 clusterroleaggregation_controller.go:180] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again
W0823 01:16:08.935] E0823 01:16:08.722845   52841 clusterroleaggregation_controller.go:180] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again
W0823 01:16:08.935] I0823 01:16:08.730466   52841 controller_utils.go:1036] Caches are synced for namespace controller
W0823 01:16:08.935] I0823 01:16:08.798229   52841 controller_utils.go:1036] Caches are synced for endpoint controller
W0823 01:16:08.935] I0823 01:16:08.798781   52841 controller_utils.go:1036] Caches are synced for GC controller
W0823 01:16:08.935] I0823 01:16:08.801637   52841 controller_utils.go:1036] Caches are synced for HPA controller
W0823 01:16:08.935] I0823 01:16:08.804161   52841 controller_utils.go:1036] Caches are synced for taint controller
W0823 01:16:08.936] I0823 01:16:08.804256   52841 taint_manager.go:186] Starting NoExecuteTaintManager
... skipping 80 lines ...
I0823 01:16:12.374] +++ working dir: /go/src/k8s.io/kubernetes
I0823 01:16:12.378] +++ command: run_RESTMapper_evaluation_tests
I0823 01:16:12.390] +++ [0823 01:16:12] Creating namespace namespace-1566522972-25860
I0823 01:16:12.469] namespace/namespace-1566522972-25860 created
I0823 01:16:12.542] Context "test" modified.
I0823 01:16:12.551] +++ [0823 01:16:12] Testing RESTMapper
I0823 01:16:12.661] +++ [0823 01:16:12] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype"
I0823 01:16:12.680] +++ exit code: 0
I0823 01:16:12.809] NAME                              SHORTNAMES   APIGROUP                       NAMESPACED   KIND
I0823 01:16:12.810] bindings                                                                      true         Binding
I0823 01:16:12.810] componentstatuses                 cs                                          false        ComponentStatus
I0823 01:16:12.810] configmaps                        cm                                          true         ConfigMap
I0823 01:16:12.810] endpoints                         ep                                          true         Endpoints
... skipping 653 lines ...
I0823 01:16:34.890] (Bcore.sh:219: Successful get secrets --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:16:34.974] (Bsecret/test-secret created
I0823 01:16:35.071] core.sh:223: Successful get secret/test-secret --namespace=test-kubectl-describe-pod {{.metadata.name}}: test-secret
I0823 01:16:35.159] (Bcore.sh:224: Successful get secret/test-secret --namespace=test-kubectl-describe-pod {{.type}}: test-type
I0823 01:16:35.280] (Bcore.sh:229: Successful get configmaps --namespace=test-kubectl-describe-pod {{range.items}}{{ if eq .metadata.name \"test-configmap\" }}found{{end}}{{end}}:: :
I0823 01:16:35.396] (Bconfigmap/test-configmap created
W0823 01:16:35.497] error: resource(s) were provided, but no name, label selector, or --all flag specified
W0823 01:16:35.497] error: setting 'all' parameter but found a non empty selector. 
W0823 01:16:35.497] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0823 01:16:35.584] I0823 01:16:35.583946   49405 controller.go:606] quota admission added evaluator for: poddisruptionbudgets.policy
I0823 01:16:35.685] core.sh:235: Successful get configmap/test-configmap --namespace=test-kubectl-describe-pod {{.metadata.name}}: test-configmap
I0823 01:16:35.685] (Bpoddisruptionbudget.policy/test-pdb-1 created
I0823 01:16:35.689] core.sh:241: Successful get pdb/test-pdb-1 --namespace=test-kubectl-describe-pod {{.spec.minAvailable}}: 2
I0823 01:16:35.775] (Bpoddisruptionbudget.policy/test-pdb-2 created
I0823 01:16:35.883] core.sh:245: Successful get pdb/test-pdb-2 --namespace=test-kubectl-describe-pod {{.spec.minAvailable}}: 50%
I0823 01:16:35.963] (Bpoddisruptionbudget.policy/test-pdb-3 created
I0823 01:16:36.077] core.sh:251: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2
I0823 01:16:36.157] (Bpoddisruptionbudget.policy/test-pdb-4 created
I0823 01:16:36.290] core.sh:255: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50%
I0823 01:16:36.495] (Bcore.sh:261: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:16:36.727] (Bpod/env-test-pod created
W0823 01:16:36.828] error: min-available and max-unavailable cannot be both specified
I0823 01:16:36.980] core.sh:264: Successful describe pods --namespace=test-kubectl-describe-pod env-test-pod:
I0823 01:16:36.981] Name:         env-test-pod
I0823 01:16:36.981] Namespace:    test-kubectl-describe-pod
I0823 01:16:36.981] Priority:     0
I0823 01:16:36.981] Node:         <none>
I0823 01:16:36.981] Labels:       <none>
... skipping 173 lines ...
I0823 01:16:50.675] (Bpod/valid-pod patched
I0823 01:16:50.769] core.sh:470: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: changed-with-yaml:
I0823 01:16:50.846] (Bpod/valid-pod patched
I0823 01:16:50.951] core.sh:475: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:3.1:
I0823 01:16:51.133] (Bpod/valid-pod patched
I0823 01:16:51.265] core.sh:491: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx:
I0823 01:16:51.449] (B+++ [0823 01:16:51] "kubectl patch with resourceVersion 500" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again
I0823 01:16:51.681] pod "valid-pod" deleted
I0823 01:16:51.693] pod/valid-pod replaced
I0823 01:16:51.798] core.sh:515: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname
I0823 01:16:51.971] (BSuccessful
I0823 01:16:51.972] message:error: --grace-period must have --force specified
I0823 01:16:51.972] has:\-\-grace-period must have \-\-force specified
I0823 01:16:52.145] Successful
I0823 01:16:52.145] message:error: --timeout must have --force specified
I0823 01:16:52.145] has:\-\-timeout must have \-\-force specified
I0823 01:16:52.371] node/node-v1-test created
W0823 01:16:52.472] W0823 01:16:52.371163   52841 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist
I0823 01:16:52.573] node/node-v1-test replaced
I0823 01:16:52.647] core.sh:552: Successful get node node-v1-test {{.metadata.annotations.a}}: b
I0823 01:16:52.723] (Bnode "node-v1-test" deleted
I0823 01:16:52.835] core.sh:559: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx:
I0823 01:16:53.150] (Bcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/serve_hostname:
I0823 01:16:54.187] (Bcore.sh:575: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod
... skipping 25 lines ...
I0823 01:16:54.742] (Bcore.sh:593: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod
I0823 01:16:54.832] (Bpod/valid-pod labeled
W0823 01:16:54.932] Edit cancelled, no changes made.
W0823 01:16:54.933] Edit cancelled, no changes made.
W0823 01:16:54.933] Edit cancelled, no changes made.
W0823 01:16:54.933] Edit cancelled, no changes made.
W0823 01:16:54.933] error: 'name' already has a value (valid-pod), and --overwrite is false
I0823 01:16:55.033] core.sh:597: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan
I0823 01:16:55.074] (Bcore.sh:601: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0823 01:16:55.162] (Bpod "valid-pod" force deleted
W0823 01:16:55.262] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0823 01:16:55.363] core.sh:605: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:16:55.363] (B+++ [0823 01:16:55] Creating namespace namespace-1566523015-20193
... skipping 82 lines ...
I0823 01:17:02.687] +++ Running case: test-cmd.run_kubectl_create_error_tests 
I0823 01:17:02.690] +++ working dir: /go/src/k8s.io/kubernetes
I0823 01:17:02.693] +++ command: run_kubectl_create_error_tests
I0823 01:17:02.705] +++ [0823 01:17:02] Creating namespace namespace-1566523022-7537
I0823 01:17:02.783] namespace/namespace-1566523022-7537 created
I0823 01:17:02.850] Context "test" modified.
I0823 01:17:02.857] +++ [0823 01:17:02] Testing kubectl create with error
W0823 01:17:02.958] Error: must specify one of -f and -k
W0823 01:17:02.958] 
W0823 01:17:02.958] Create a resource from a file or from stdin.
W0823 01:17:02.958] 
W0823 01:17:02.959]  JSON and YAML formats are accepted.
W0823 01:17:02.959] 
W0823 01:17:02.959] Examples:
... skipping 41 lines ...
W0823 01:17:02.965] 
W0823 01:17:02.965] Usage:
W0823 01:17:02.965]   kubectl create -f FILENAME [options]
W0823 01:17:02.965] 
W0823 01:17:02.966] Use "kubectl <command> --help" for more information about a given command.
W0823 01:17:02.966] Use "kubectl options" for a list of global command-line options (applies to all commands).
I0823 01:17:03.114] +++ [0823 01:17:03] "kubectl create with empty string list returns error as expected: error: error validating "hack/testdata/invalid-rc-with-empty-args.yaml": error validating data: ValidationError(ReplicationController.spec.template.spec.containers[0].args): unknown object type "nil" in ReplicationController.spec.template.spec.containers[0].args[0]; if you choose to ignore these errors, turn validation off with --validate=false
W0823 01:17:03.215] kubectl convert is DEPRECATED and will be removed in a future version.
W0823 01:17:03.215] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
I0823 01:17:03.315] +++ exit code: 0
I0823 01:17:03.357] Recording: run_kubectl_apply_tests
I0823 01:17:03.358] Running command: run_kubectl_apply_tests
I0823 01:17:03.387] 
... skipping 16 lines ...
I0823 01:17:05.120] apply.sh:276: Successful get pods test-pod {{.metadata.labels.name}}: test-pod-label
I0823 01:17:05.202] (Bpod "test-pod" deleted
I0823 01:17:05.427] customresourcedefinition.apiextensions.k8s.io/resources.mygroup.example.com created
W0823 01:17:05.735] I0823 01:17:05.734800   49405 client.go:361] parsed scheme: "endpoint"
W0823 01:17:05.735] I0823 01:17:05.734876   49405 endpoint.go:66] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
W0823 01:17:05.738] I0823 01:17:05.738077   49405 controller.go:606] quota admission added evaluator for: resources.mygroup.example.com
W0823 01:17:05.836] Error from server (NotFound): resources.mygroup.example.com "myobj" not found
I0823 01:17:05.938] kind.mygroup.example.com/myobj serverside-applied (server dry run)
I0823 01:17:05.938] customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted
I0823 01:17:05.997] +++ exit code: 0
I0823 01:17:06.036] Recording: run_kubectl_run_tests
I0823 01:17:06.037] Running command: run_kubectl_run_tests
I0823 01:17:06.062] 
... skipping 97 lines ...
I0823 01:17:08.669] Context "test" modified.
I0823 01:17:08.676] +++ [0823 01:17:08] Testing kubectl create filter
I0823 01:17:08.763] create.sh:30: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:17:08.960] (Bpod/selector-test-pod created
I0823 01:17:09.056] create.sh:34: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod
I0823 01:17:09.138] (BSuccessful
I0823 01:17:09.138] message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found
I0823 01:17:09.138] has:pods "selector-test-pod-dont-apply" not found
I0823 01:17:09.215] pod "selector-test-pod" deleted
I0823 01:17:09.236] +++ exit code: 0
I0823 01:17:09.268] Recording: run_kubectl_apply_deployments_tests
I0823 01:17:09.269] Running command: run_kubectl_apply_deployments_tests
I0823 01:17:09.291] 
... skipping 29 lines ...
W0823 01:17:11.887] I0823 01:17:11.790546   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1566523029-18852", Name:"nginx", UID:"689384fb-c2df-41e0-ac17-e0a4f15b9e7e", APIVersion:"apps/v1", ResourceVersion:"583", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-7dbc4d9f to 3
W0823 01:17:11.888] I0823 01:17:11.794956   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523029-18852", Name:"nginx-7dbc4d9f", UID:"5429ccd0-c875-4c2d-967f-85baa30bc3f2", APIVersion:"apps/v1", ResourceVersion:"584", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-cgvr9
W0823 01:17:11.888] I0823 01:17:11.798114   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523029-18852", Name:"nginx-7dbc4d9f", UID:"5429ccd0-c875-4c2d-967f-85baa30bc3f2", APIVersion:"apps/v1", ResourceVersion:"584", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-kknxz
W0823 01:17:11.889] I0823 01:17:11.800135   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523029-18852", Name:"nginx-7dbc4d9f", UID:"5429ccd0-c875-4c2d-967f-85baa30bc3f2", APIVersion:"apps/v1", ResourceVersion:"584", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-75x2b
I0823 01:17:11.989] apps.sh:148: Successful get deployment nginx {{.metadata.name}}: nginx
I0823 01:17:16.153] (BSuccessful
I0823 01:17:16.154] message:Error from server (Conflict): error when applying patch:
I0823 01:17:16.155] {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1566523029-18852\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}}
I0823 01:17:16.155] to:
I0823 01:17:16.155] Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment"
I0823 01:17:16.155] Name: "nginx", Namespace: "namespace-1566523029-18852"
I0823 01:17:16.160] Object: &{map["apiVersion":"apps/v1" "kind":"Deployment" "metadata":map["annotations":map["deployment.kubernetes.io/revision":"1" "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1566523029-18852\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx1\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx1\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"] "creationTimestamp":"2019-08-23T01:17:11Z" "generation":'\x01' "labels":map["name":"nginx"] "managedFields":[map["apiVersion":"apps/v1" "fields":map["f:metadata":map["f:annotations":map[".":map[] "f:kubectl.kubernetes.io/last-applied-configuration":map[]] "f:labels":map[".":map[] "f:name":map[]]] "f:spec":map["f:progressDeadlineSeconds":map[] "f:replicas":map[] "f:revisionHistoryLimit":map[] "f:selector":map["f:matchLabels":map[".":map[] "f:name":map[]]] "f:strategy":map["f:rollingUpdate":map[".":map[] "f:maxSurge":map[] "f:maxUnavailable":map[]] "f:type":map[]] "f:template":map["f:metadata":map["f:labels":map[".":map[] "f:name":map[]]] "f:spec":map["f:containers":map["k:{\"name\":\"nginx\"}":map[".":map[] "f:image":map[] "f:imagePullPolicy":map[] "f:name":map[] "f:ports":map[".":map[] "k:{\"containerPort\":80,\"protocol\":\"TCP\"}":map[".":map[] "f:containerPort":map[] "f:protocol":map[]]] "f:resources":map[] "f:terminationMessagePath":map[] "f:terminationMessagePolicy":map[]]] "f:dnsPolicy":map[] "f:restartPolicy":map[] "f:schedulerName":map[] "f:securityContext":map[] "f:terminationGracePeriodSeconds":map[]]]]] "manager":"kubectl" "operation":"Update" "time":"2019-08-23T01:17:11Z"] map["apiVersion":"apps/v1" "fields":map["f:metadata":map["f:annotations":map["f:deployment.kubernetes.io/revision":map[]]] "f:status":map["f:conditions":map[".":map[] "k:{\"type\":\"Available\"}":map[".":map[] "f:lastTransitionTime":map[] "f:lastUpdateTime":map[] "f:message":map[] "f:reason":map[] "f:status":map[] "f:type":map[]] "k:{\"type\":\"Progressing\"}":map[".":map[] "f:lastTransitionTime":map[] "f:lastUpdateTime":map[] "f:message":map[] "f:reason":map[] "f:status":map[] "f:type":map[]]] "f:observedGeneration":map[] "f:replicas":map[] "f:unavailableReplicas":map[] "f:updatedReplicas":map[]]] "manager":"kube-controller-manager" "operation":"Update" "time":"2019-08-23T01:17:11Z"]] "name":"nginx" "namespace":"namespace-1566523029-18852" "resourceVersion":"596" "selfLink":"/apis/apps/v1/namespaces/namespace-1566523029-18852/deployments/nginx" "uid":"689384fb-c2df-41e0-ac17-e0a4f15b9e7e"] "spec":map["progressDeadlineSeconds":'\u0258' "replicas":'\x03' "revisionHistoryLimit":'\n' "selector":map["matchLabels":map["name":"nginx1"]] "strategy":map["rollingUpdate":map["maxSurge":"25%" "maxUnavailable":"25%"] "type":"RollingUpdate"] "template":map["metadata":map["creationTimestamp":<nil> "labels":map["name":"nginx1"]] "spec":map["containers":[map["image":"k8s.gcr.io/nginx:test-cmd" "imagePullPolicy":"IfNotPresent" "name":"nginx" "ports":[map["containerPort":'P' "protocol":"TCP"]] "resources":map[] "terminationMessagePath":"/dev/termination-log" "terminationMessagePolicy":"File"]] "dnsPolicy":"ClusterFirst" "restartPolicy":"Always" "schedulerName":"default-scheduler" "securityContext":map[] "terminationGracePeriodSeconds":'\x1e']]] "status":map["conditions":[map["lastTransitionTime":"2019-08-23T01:17:11Z" "lastUpdateTime":"2019-08-23T01:17:11Z" "message":"Deployment does not have minimum availability." "reason":"MinimumReplicasUnavailable" "status":"False" "type":"Available"] map["lastTransitionTime":"2019-08-23T01:17:11Z" "lastUpdateTime":"2019-08-23T01:17:11Z" "message":"ReplicaSet \"nginx-7dbc4d9f\" is progressing." "reason":"ReplicaSetUpdated" "status":"True" "type":"Progressing"]] "observedGeneration":'\x01' "replicas":'\x03' "unavailableReplicas":'\x03' "updatedReplicas":'\x03']]}
I0823 01:17:16.160] for: "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again
I0823 01:17:16.160] has:Error from server (Conflict)
W0823 01:17:17.039] I0823 01:17:17.038859   52841 horizontal.go:341] Horizontal Pod Autoscaler frontend has been deleted in namespace-1566523019-12562
I0823 01:17:21.478] deployment.apps/nginx configured
W0823 01:17:21.579] I0823 01:17:21.485398   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1566523029-18852", Name:"nginx", UID:"9a1578bc-be10-46c7-bfb7-5978bc2b8c95", APIVersion:"apps/v1", ResourceVersion:"620", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-594f77b9f6 to 3
W0823 01:17:21.580] I0823 01:17:21.489593   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523029-18852", Name:"nginx-594f77b9f6", UID:"e1388d6b-afc7-4192-88b2-f79e4fa6b952", APIVersion:"apps/v1", ResourceVersion:"621", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-mqdxc
W0823 01:17:21.580] I0823 01:17:21.494820   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523029-18852", Name:"nginx-594f77b9f6", UID:"e1388d6b-afc7-4192-88b2-f79e4fa6b952", APIVersion:"apps/v1", ResourceVersion:"621", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-6bwxl
W0823 01:17:21.581] I0823 01:17:21.496264   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523029-18852", Name:"nginx-594f77b9f6", UID:"e1388d6b-afc7-4192-88b2-f79e4fa6b952", APIVersion:"apps/v1", ResourceVersion:"621", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-k8pkb
... skipping 178 lines ...
I0823 01:17:29.023] +++ [0823 01:17:29] Creating namespace namespace-1566523049-16444
I0823 01:17:29.107] namespace/namespace-1566523049-16444 created
I0823 01:17:29.183] Context "test" modified.
I0823 01:17:29.191] +++ [0823 01:17:29] Testing kubectl get
I0823 01:17:29.279] get.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:17:29.363] (BSuccessful
I0823 01:17:29.363] message:Error from server (NotFound): pods "abc" not found
I0823 01:17:29.363] has:pods "abc" not found
I0823 01:17:29.460] get.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:17:29.552] (BSuccessful
I0823 01:17:29.553] message:Error from server (NotFound): pods "abc" not found
I0823 01:17:29.553] has:pods "abc" not found
I0823 01:17:29.663] get.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:17:29.762] (BSuccessful
I0823 01:17:29.762] message:{
I0823 01:17:29.762]     "apiVersion": "v1",
I0823 01:17:29.763]     "items": [],
... skipping 23 lines ...
I0823 01:17:30.128] has not:No resources found
I0823 01:17:30.207] Successful
I0823 01:17:30.208] message:NAME
I0823 01:17:30.208] has not:No resources found
I0823 01:17:30.292] get.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:17:30.387] (BSuccessful
I0823 01:17:30.388] message:error: the server doesn't have a resource type "foobar"
I0823 01:17:30.388] has not:No resources found
I0823 01:17:30.480] Successful
I0823 01:17:30.480] message:No resources found in namespace-1566523049-16444 namespace.
I0823 01:17:30.481] has:No resources found
I0823 01:17:30.572] Successful
I0823 01:17:30.572] message:
I0823 01:17:30.573] has not:No resources found
I0823 01:17:30.662] Successful
I0823 01:17:30.662] message:No resources found in namespace-1566523049-16444 namespace.
I0823 01:17:30.662] has:No resources found
I0823 01:17:30.754] get.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:17:30.862] (BSuccessful
I0823 01:17:30.863] message:Error from server (NotFound): pods "abc" not found
I0823 01:17:30.863] has:pods "abc" not found
I0823 01:17:30.863] FAIL!
I0823 01:17:30.863] message:Error from server (NotFound): pods "abc" not found
I0823 01:17:30.863] has not:List
I0823 01:17:30.863] 99 /go/src/k8s.io/kubernetes/test/cmd/../../test/cmd/get.sh
I0823 01:17:31.007] Successful
I0823 01:17:31.007] message:I0823 01:17:30.945630   62775 loader.go:375] Config loaded from file:  /tmp/tmp.3axnv6CTxv/.kube/config
I0823 01:17:31.008] I0823 01:17:30.947211   62775 round_trippers.go:443] GET http://127.0.0.1:8080/version?timeout=32s 200 OK in 1 milliseconds
I0823 01:17:31.008] I0823 01:17:30.981205   62775 round_trippers.go:443] GET http://127.0.0.1:8080/api/v1/namespaces/default/pods 200 OK in 2 milliseconds
... skipping 660 lines ...
I0823 01:17:36.663] Successful
I0823 01:17:36.663] message:NAME    DATA   AGE
I0823 01:17:36.663] one     0      0s
I0823 01:17:36.664] three   0      0s
I0823 01:17:36.664] two     0      0s
I0823 01:17:36.664] STATUS    REASON          MESSAGE
I0823 01:17:36.664] Failure   InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0823 01:17:36.664] has not:watch is only supported on individual resources
I0823 01:17:37.752] Successful
I0823 01:17:37.753] message:STATUS    REASON          MESSAGE
I0823 01:17:37.753] Failure   InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0823 01:17:37.753] has not:watch is only supported on individual resources
I0823 01:17:37.757] +++ [0823 01:17:37] Creating namespace namespace-1566523057-14619
I0823 01:17:37.825] namespace/namespace-1566523057-14619 created
I0823 01:17:37.891] Context "test" modified.
I0823 01:17:37.982] get.sh:157: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:17:38.126] (Bpod/valid-pod created
... skipping 104 lines ...
I0823 01:17:38.221] }
I0823 01:17:38.306] get.sh:162: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0823 01:17:38.568] (B<no value>Successful
I0823 01:17:38.569] message:valid-pod:
I0823 01:17:38.569] has:valid-pod:
I0823 01:17:38.648] Successful
I0823 01:17:38.649] message:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template:
I0823 01:17:38.649] 	template was:
I0823 01:17:38.649] 		{.missing}
I0823 01:17:38.649] 	object given to jsonpath engine was:
I0823 01:17:38.651] 		map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2019-08-23T01:17:38Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fields":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:priority":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl", "operation":"Update", "time":"2019-08-23T01:17:38Z"}}, "name":"valid-pod", "namespace":"namespace-1566523057-14619", "resourceVersion":"698", "selfLink":"/api/v1/namespaces/namespace-1566523057-14619/pods/valid-pod", "uid":"977cb64b-40ba-46ff-9d40-e563ac381fc9"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"k8s.gcr.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}}
I0823 01:17:38.651] has:missing is not found
I0823 01:17:38.728] Successful
I0823 01:17:38.729] message:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template:
I0823 01:17:38.729] 	template was:
I0823 01:17:38.729] 		{{.missing}}
I0823 01:17:38.729] 	raw data was:
I0823 01:17:38.731] 		{"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2019-08-23T01:17:38Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fields":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:priority":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl","operation":"Update","time":"2019-08-23T01:17:38Z"}],"name":"valid-pod","namespace":"namespace-1566523057-14619","resourceVersion":"698","selfLink":"/api/v1/namespaces/namespace-1566523057-14619/pods/valid-pod","uid":"977cb64b-40ba-46ff-9d40-e563ac381fc9"},"spec":{"containers":[{"image":"k8s.gcr.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}}
I0823 01:17:38.731] 	object given to template engine was:
I0823 01:17:38.732] 		map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2019-08-23T01:17:38Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fields:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:priority:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl operation:Update time:2019-08-23T01:17:38Z]] name:valid-pod namespace:namespace-1566523057-14619 resourceVersion:698 selfLink:/api/v1/namespaces/namespace-1566523057-14619/pods/valid-pod uid:977cb64b-40ba-46ff-9d40-e563ac381fc9] spec:map[containers:[map[image:k8s.gcr.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]]
I0823 01:17:38.732] has:map has no entry for key "missing"
W0823 01:17:38.833] error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing"
I0823 01:17:39.813] Successful
I0823 01:17:39.813] message:NAME        READY   STATUS    RESTARTS   AGE
I0823 01:17:39.813] valid-pod   0/1     Pending   0          0s
I0823 01:17:39.814] STATUS      REASON          MESSAGE
I0823 01:17:39.814] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0823 01:17:39.814] has:STATUS
I0823 01:17:39.814] Successful
I0823 01:17:39.814] message:NAME        READY   STATUS    RESTARTS   AGE
I0823 01:17:39.815] valid-pod   0/1     Pending   0          0s
I0823 01:17:39.815] STATUS      REASON          MESSAGE
I0823 01:17:39.815] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0823 01:17:39.815] has:valid-pod
I0823 01:17:40.895] Successful
I0823 01:17:40.895] message:pod/valid-pod
I0823 01:17:40.895] has not:STATUS
I0823 01:17:40.897] Successful
I0823 01:17:40.897] message:pod/valid-pod
... skipping 144 lines ...
I0823 01:17:41.998] status:
I0823 01:17:41.998]   phase: Pending
I0823 01:17:41.999]   qosClass: Guaranteed
I0823 01:17:41.999] ---
I0823 01:17:41.999] has:name: valid-pod
I0823 01:17:42.070] Successful
I0823 01:17:42.070] message:Error from server (NotFound): pods "invalid-pod" not found
I0823 01:17:42.070] has:"invalid-pod" not found
I0823 01:17:42.148] pod "valid-pod" deleted
I0823 01:17:42.246] get.sh:200: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:17:42.404] (Bpod/redis-master created
I0823 01:17:42.408] pod/valid-pod created
I0823 01:17:42.501] Successful
... skipping 35 lines ...
I0823 01:17:43.621] +++ command: run_kubectl_exec_pod_tests
I0823 01:17:43.632] +++ [0823 01:17:43] Creating namespace namespace-1566523063-7965
I0823 01:17:43.707] namespace/namespace-1566523063-7965 created
I0823 01:17:43.780] Context "test" modified.
I0823 01:17:43.789] +++ [0823 01:17:43] Testing kubectl exec POD COMMAND
I0823 01:17:43.871] Successful
I0823 01:17:43.871] message:Error from server (NotFound): pods "abc" not found
I0823 01:17:43.871] has:pods "abc" not found
I0823 01:17:44.032] pod/test-pod created
I0823 01:17:44.128] Successful
I0823 01:17:44.129] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0823 01:17:44.129] has not:pods "test-pod" not found
I0823 01:17:44.131] Successful
I0823 01:17:44.131] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0823 01:17:44.131] has not:pod or type/name must be specified
I0823 01:17:44.208] pod "test-pod" deleted
I0823 01:17:44.231] +++ exit code: 0
I0823 01:17:44.267] Recording: run_kubectl_exec_resource_name_tests
I0823 01:17:44.267] Running command: run_kubectl_exec_resource_name_tests
I0823 01:17:44.289] 
... skipping 2 lines ...
I0823 01:17:44.298] +++ command: run_kubectl_exec_resource_name_tests
I0823 01:17:44.310] +++ [0823 01:17:44] Creating namespace namespace-1566523064-25257
I0823 01:17:44.382] namespace/namespace-1566523064-25257 created
I0823 01:17:44.452] Context "test" modified.
I0823 01:17:44.460] +++ [0823 01:17:44] Testing kubectl exec TYPE/NAME COMMAND
I0823 01:17:44.564] Successful
I0823 01:17:44.564] message:error: the server doesn't have a resource type "foo"
I0823 01:17:44.564] has:error:
I0823 01:17:44.646] Successful
I0823 01:17:44.646] message:Error from server (NotFound): deployments.apps "bar" not found
I0823 01:17:44.647] has:"bar" not found
I0823 01:17:44.806] pod/test-pod created
I0823 01:17:44.978] replicaset.apps/frontend created
W0823 01:17:45.078] I0823 01:17:44.982321   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523064-25257", Name:"frontend", UID:"0c8e513e-b1f0-4cdd-936f-37058d7443f7", APIVersion:"apps/v1", ResourceVersion:"750", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-g8pvt
W0823 01:17:45.079] I0823 01:17:44.986531   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523064-25257", Name:"frontend", UID:"0c8e513e-b1f0-4cdd-936f-37058d7443f7", APIVersion:"apps/v1", ResourceVersion:"750", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-l4s77
W0823 01:17:45.079] I0823 01:17:44.986579   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523064-25257", Name:"frontend", UID:"0c8e513e-b1f0-4cdd-936f-37058d7443f7", APIVersion:"apps/v1", ResourceVersion:"750", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-n54cq
I0823 01:17:45.179] configmap/test-set-env-config created
I0823 01:17:45.263] Successful
I0823 01:17:45.264] message:error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented
I0823 01:17:45.264] has:not implemented
I0823 01:17:45.360] Successful
I0823 01:17:45.360] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0823 01:17:45.360] has not:not found
I0823 01:17:45.362] Successful
I0823 01:17:45.362] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0823 01:17:45.363] has not:pod or type/name must be specified
I0823 01:17:45.462] Successful
I0823 01:17:45.463] message:Error from server (BadRequest): pod frontend-g8pvt does not have a host assigned
I0823 01:17:45.463] has not:not found
I0823 01:17:45.464] Successful
I0823 01:17:45.465] message:Error from server (BadRequest): pod frontend-g8pvt does not have a host assigned
I0823 01:17:45.465] has not:pod or type/name must be specified
I0823 01:17:45.539] pod "test-pod" deleted
I0823 01:17:45.619] replicaset.apps "frontend" deleted
I0823 01:17:45.710] configmap "test-set-env-config" deleted
I0823 01:17:45.734] +++ exit code: 0
I0823 01:17:45.771] Recording: run_create_secret_tests
I0823 01:17:45.771] Running command: run_create_secret_tests
I0823 01:17:45.794] 
I0823 01:17:45.797] +++ Running case: test-cmd.run_create_secret_tests 
I0823 01:17:45.802] +++ working dir: /go/src/k8s.io/kubernetes
I0823 01:17:45.804] +++ command: run_create_secret_tests
I0823 01:17:45.912] Successful
I0823 01:17:45.912] message:Error from server (NotFound): secrets "mysecret" not found
I0823 01:17:45.913] has:secrets "mysecret" not found
I0823 01:17:46.081] Successful
I0823 01:17:46.082] message:Error from server (NotFound): secrets "mysecret" not found
I0823 01:17:46.082] has:secrets "mysecret" not found
I0823 01:17:46.084] Successful
I0823 01:17:46.084] message:user-specified
I0823 01:17:46.084] has:user-specified
I0823 01:17:46.159] Successful
I0823 01:17:46.254] {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","selfLink":"/api/v1/namespaces/default/configmaps/tester-update-cm","uid":"3338adcc-0adb-4c49-848b-9c295c848592","resourceVersion":"773","creationTimestamp":"2019-08-23T01:17:46Z"}}
... skipping 2 lines ...
I0823 01:17:46.422] has:uid
I0823 01:17:46.505] Successful
I0823 01:17:46.505] message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","selfLink":"/api/v1/namespaces/default/configmaps/tester-update-cm","uid":"3338adcc-0adb-4c49-848b-9c295c848592","resourceVersion":"774","creationTimestamp":"2019-08-23T01:17:46Z","managedFields":[{"manager":"kubectl","operation":"Update","apiVersion":"v1","time":"2019-08-23T01:17:46Z","fields":{"f:data":{"f:key1":{},".":{}}}}]},"data":{"key1":"config1"}}
I0823 01:17:46.505] has:config1
I0823 01:17:46.580] {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"3338adcc-0adb-4c49-848b-9c295c848592"}}
I0823 01:17:46.673] Successful
I0823 01:17:46.673] message:Error from server (NotFound): configmaps "tester-update-cm" not found
I0823 01:17:46.673] has:configmaps "tester-update-cm" not found
I0823 01:17:46.687] +++ exit code: 0
I0823 01:17:46.722] Recording: run_kubectl_create_kustomization_directory_tests
I0823 01:17:46.723] Running command: run_kubectl_create_kustomization_directory_tests
I0823 01:17:46.744] 
I0823 01:17:46.747] +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests 
... skipping 158 lines ...
I0823 01:17:49.711] valid-pod   0/1     Pending   0          0s
I0823 01:17:49.711] has:valid-pod
I0823 01:17:50.795] Successful
I0823 01:17:50.796] message:NAME        READY   STATUS    RESTARTS   AGE
I0823 01:17:50.796] valid-pod   0/1     Pending   0          0s
I0823 01:17:50.796] STATUS      REASON          MESSAGE
I0823 01:17:50.796] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0823 01:17:50.796] has:Timeout exceeded while reading body
I0823 01:17:50.883] Successful
I0823 01:17:50.883] message:NAME        READY   STATUS    RESTARTS   AGE
I0823 01:17:50.883] valid-pod   0/1     Pending   0          1s
I0823 01:17:50.883] has:valid-pod
I0823 01:17:50.952] Successful
I0823 01:17:50.953] message:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)
I0823 01:17:50.953] has:Invalid timeout value
I0823 01:17:51.034] pod "valid-pod" deleted
I0823 01:17:51.068] +++ exit code: 0
I0823 01:17:51.113] Recording: run_crd_tests
I0823 01:17:51.113] Running command: run_crd_tests
I0823 01:17:51.142] 
... skipping 236 lines ...
I0823 01:17:56.417] foo.company.com/test patched
I0823 01:17:56.537] crd.sh:236: Successful get foos/test {{.patched}}: value1
I0823 01:17:56.646] (Bfoo.company.com/test patched
I0823 01:17:56.776] crd.sh:238: Successful get foos/test {{.patched}}: value2
I0823 01:17:56.892] (Bfoo.company.com/test patched
I0823 01:17:57.003] crd.sh:240: Successful get foos/test {{.patched}}: <no value>
I0823 01:17:57.220] (B+++ [0823 01:17:57] "kubectl patch --local" returns error as expected for CustomResource: error: cannot apply strategic merge patch for company.com/v1, Kind=Foo locally, try --type merge
I0823 01:17:57.295] {
I0823 01:17:57.296]     "apiVersion": "company.com/v1",
I0823 01:17:57.296]     "kind": "Foo",
I0823 01:17:57.296]     "metadata": {
I0823 01:17:57.296]         "annotations": {
I0823 01:17:57.296]             "kubernetes.io/change-cause": "kubectl patch foos/test --server=http://127.0.0.1:8080 --match-server-version=true --patch={\"patched\":null} --type=merge --record=true"
... skipping 294 lines ...
I0823 01:18:17.757] (Bnamespace/non-native-resources created
I0823 01:18:17.945] bar.company.com/test created
I0823 01:18:18.063] crd.sh:455: Successful get bars {{len .items}}: 1
I0823 01:18:18.150] (Bnamespace "non-native-resources" deleted
I0823 01:18:23.378] crd.sh:458: Successful get bars {{len .items}}: 0
I0823 01:18:23.574] (Bcustomresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted
W0823 01:18:23.674] Error from server (NotFound): namespaces "non-native-resources" not found
I0823 01:18:23.775] customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted
I0823 01:18:23.812] customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted
I0823 01:18:23.920] customresourcedefinition.apiextensions.k8s.io "validfoos.company.com" deleted
I0823 01:18:23.956] +++ exit code: 0
I0823 01:18:23.997] Recording: run_cmd_with_img_tests
I0823 01:18:23.997] Running command: run_cmd_with_img_tests
... skipping 10 lines ...
W0823 01:18:24.343] I0823 01:18:24.342609   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523104-26827", Name:"test1-9797f89d8", UID:"98c50c38-c2fa-428d-8f1c-6a379d5b1dd1", APIVersion:"apps/v1", ResourceVersion:"928", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test1-9797f89d8-sxqxx
I0823 01:18:24.448] Successful
I0823 01:18:24.448] message:deployment.apps/test1 created
I0823 01:18:24.448] has:deployment.apps/test1 created
I0823 01:18:24.459] deployment.apps "test1" deleted
I0823 01:18:24.551] Successful
I0823 01:18:24.551] message:error: Invalid image name "InvalidImageName": invalid reference format
I0823 01:18:24.551] has:error: Invalid image name "InvalidImageName": invalid reference format
I0823 01:18:24.569] +++ exit code: 0
I0823 01:18:24.621] +++ [0823 01:18:24] Testing recursive resources
I0823 01:18:24.629] +++ [0823 01:18:24] Creating namespace namespace-1566523104-25218
I0823 01:18:24.709] namespace/namespace-1566523104-25218 created
I0823 01:18:24.781] Context "test" modified.
I0823 01:18:24.887] generic-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:25.232] (Bgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:25.236] (BSuccessful
I0823 01:18:25.236] message:pod/busybox0 created
I0823 01:18:25.236] pod/busybox1 created
I0823 01:18:25.236] error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0823 01:18:25.236] has:error validating data: kind not set
I0823 01:18:25.333] generic-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:25.510] (Bgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox:
I0823 01:18:25.513] (BSuccessful
I0823 01:18:25.513] message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:25.513] has:Object 'Kind' is missing
I0823 01:18:25.607] generic-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:25.883] (Bgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced:
I0823 01:18:25.887] (BSuccessful
I0823 01:18:25.887] message:pod/busybox0 replaced
I0823 01:18:25.887] pod/busybox1 replaced
I0823 01:18:25.888] error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0823 01:18:25.888] has:error validating data: kind not set
I0823 01:18:25.982] generic-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:26.088] (BSuccessful
I0823 01:18:26.089] message:Name:         busybox0
I0823 01:18:26.089] Namespace:    namespace-1566523104-25218
I0823 01:18:26.089] Priority:     0
I0823 01:18:26.089] Node:         <none>
... skipping 159 lines ...
I0823 01:18:26.104] has:Object 'Kind' is missing
I0823 01:18:26.196] generic-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:26.387] (Bgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue:
I0823 01:18:26.390] (BSuccessful
I0823 01:18:26.390] message:pod/busybox0 annotated
I0823 01:18:26.390] pod/busybox1 annotated
I0823 01:18:26.391] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:26.391] has:Object 'Kind' is missing
I0823 01:18:26.496] generic-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:26.767] (Bgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced:
I0823 01:18:26.771] (BSuccessful
I0823 01:18:26.771] message:Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
I0823 01:18:26.771] pod/busybox0 configured
I0823 01:18:26.771] Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
I0823 01:18:26.772] pod/busybox1 configured
I0823 01:18:26.772] error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0823 01:18:26.772] has:error validating data: kind not set
I0823 01:18:26.853] generic-resources.sh:265: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:26.991] (Bdeployment.apps/nginx created
I0823 01:18:27.089] generic-resources.sh:269: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0823 01:18:27.178] (Bgeneric-resources.sh:270: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
I0823 01:18:27.357] (Bgeneric-resources.sh:274: Successful get deployment nginx {{ .apiVersion }}: apps/v1
I0823 01:18:27.361] (BSuccessful
... skipping 42 lines ...
I0823 01:18:27.446] deployment.apps "nginx" deleted
I0823 01:18:27.549] generic-resources.sh:281: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:27.721] (Bgeneric-resources.sh:285: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:27.723] (BSuccessful
I0823 01:18:27.724] message:kubectl convert is DEPRECATED and will be removed in a future version.
I0823 01:18:27.724] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
I0823 01:18:27.724] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:27.724] has:Object 'Kind' is missing
I0823 01:18:27.823] generic-resources.sh:290: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:27.926] (BSuccessful
I0823 01:18:27.927] message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:27.927] has:busybox0:busybox1:
I0823 01:18:27.928] Successful
I0823 01:18:27.929] message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:27.929] has:Object 'Kind' is missing
I0823 01:18:28.025] generic-resources.sh:299: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:28.119] (Bpod/busybox0 labeled
I0823 01:18:28.119] pod/busybox1 labeled
I0823 01:18:28.120] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:28.213] generic-resources.sh:304: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue:
I0823 01:18:28.216] (BSuccessful
I0823 01:18:28.216] message:pod/busybox0 labeled
I0823 01:18:28.216] pod/busybox1 labeled
I0823 01:18:28.217] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:28.217] has:Object 'Kind' is missing
I0823 01:18:28.309] generic-resources.sh:309: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:28.398] (Bpod/busybox0 patched
I0823 01:18:28.398] pod/busybox1 patched
I0823 01:18:28.399] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:28.501] generic-resources.sh:314: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox:
I0823 01:18:28.503] (BSuccessful
I0823 01:18:28.503] message:pod/busybox0 patched
I0823 01:18:28.503] pod/busybox1 patched
I0823 01:18:28.503] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:28.504] has:Object 'Kind' is missing
I0823 01:18:28.594] generic-resources.sh:319: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:28.765] (Bgeneric-resources.sh:323: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:28.767] (BSuccessful
I0823 01:18:28.768] message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0823 01:18:28.768] pod "busybox0" force deleted
I0823 01:18:28.768] pod "busybox1" force deleted
I0823 01:18:28.768] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0823 01:18:28.768] has:Object 'Kind' is missing
I0823 01:18:28.856] generic-resources.sh:328: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:29.003] (Breplicationcontroller/busybox0 created
I0823 01:18:29.007] replicationcontroller/busybox1 created
W0823 01:18:29.108] W0823 01:18:24.583282   49405 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0823 01:18:29.109] E0823 01:18:24.584740   52841 reflector.go:280] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.109] W0823 01:18:24.712633   49405 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0823 01:18:29.110] E0823 01:18:24.714141   52841 reflector.go:280] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.110] W0823 01:18:24.822657   49405 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0823 01:18:29.111] E0823 01:18:24.824377   52841 reflector.go:280] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.111] W0823 01:18:24.931114   49405 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0823 01:18:29.111] E0823 01:18:24.933735   52841 reflector.go:280] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.112] E0823 01:18:25.586346   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.112] E0823 01:18:25.715666   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.112] E0823 01:18:25.827007   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.113] E0823 01:18:25.934882   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.113] E0823 01:18:26.587460   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.114] E0823 01:18:26.717011   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.114] E0823 01:18:26.828060   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.114] E0823 01:18:26.936394   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.115] I0823 01:18:26.995631   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1566523104-25218", Name:"nginx", UID:"45ea5436-5ddf-4b61-836e-e3448b5c90a5", APIVersion:"apps/v1", ResourceVersion:"953", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-bbbbb95b5 to 3
W0823 01:18:29.115] I0823 01:18:26.999529   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523104-25218", Name:"nginx-bbbbb95b5", UID:"c89a9003-75b7-43f3-b8d0-17fe97044736", APIVersion:"apps/v1", ResourceVersion:"954", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-vzflx
W0823 01:18:29.116] I0823 01:18:27.003208   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523104-25218", Name:"nginx-bbbbb95b5", UID:"c89a9003-75b7-43f3-b8d0-17fe97044736", APIVersion:"apps/v1", ResourceVersion:"954", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-2kdsn
W0823 01:18:29.116] I0823 01:18:27.004163   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523104-25218", Name:"nginx-bbbbb95b5", UID:"c89a9003-75b7-43f3-b8d0-17fe97044736", APIVersion:"apps/v1", ResourceVersion:"954", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-2q4tg
W0823 01:18:29.116] kubectl convert is DEPRECATED and will be removed in a future version.
W0823 01:18:29.117] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
W0823 01:18:29.117] E0823 01:18:27.588831   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.117] E0823 01:18:27.718120   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.118] E0823 01:18:27.829579   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.118] E0823 01:18:27.937667   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.118] E0823 01:18:28.589950   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.119] E0823 01:18:28.719268   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.119] E0823 01:18:28.831210   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.119] E0823 01:18:28.939368   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:29.120] I0823 01:18:29.007120   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523104-25218", Name:"busybox0", UID:"de9c8b6b-9449-432b-ad8f-f6685fe30a8e", APIVersion:"v1", ResourceVersion:"984", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-kfngd
W0823 01:18:29.120] error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0823 01:18:29.121] I0823 01:18:29.011721   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523104-25218", Name:"busybox1", UID:"30441aef-891f-45e5-a6b8-8e2c9d5d63c7", APIVersion:"v1", ResourceVersion:"986", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-vd4tq
I0823 01:18:29.221] generic-resources.sh:332: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:29.222] (Bgeneric-resources.sh:337: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:29.307] (Bgeneric-resources.sh:338: Successful get rc busybox0 {{.spec.replicas}}: 1
I0823 01:18:29.404] (Bgeneric-resources.sh:339: Successful get rc busybox1 {{.spec.replicas}}: 1
I0823 01:18:29.596] (Bgeneric-resources.sh:344: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80
I0823 01:18:29.682] (Bgeneric-resources.sh:345: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80
I0823 01:18:29.684] (BSuccessful
I0823 01:18:29.684] message:horizontalpodautoscaler.autoscaling/busybox0 autoscaled
I0823 01:18:29.685] horizontalpodautoscaler.autoscaling/busybox1 autoscaled
I0823 01:18:29.685] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:29.685] has:Object 'Kind' is missing
I0823 01:18:29.756] horizontalpodautoscaler.autoscaling "busybox0" deleted
I0823 01:18:29.835] horizontalpodautoscaler.autoscaling "busybox1" deleted
I0823 01:18:29.928] generic-resources.sh:353: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:30.020] (Bgeneric-resources.sh:354: Successful get rc busybox0 {{.spec.replicas}}: 1
I0823 01:18:30.118] (Bgeneric-resources.sh:355: Successful get rc busybox1 {{.spec.replicas}}: 1
I0823 01:18:30.319] (Bgeneric-resources.sh:359: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80
I0823 01:18:30.414] (Bgeneric-resources.sh:360: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80
I0823 01:18:30.417] (BSuccessful
I0823 01:18:30.417] message:service/busybox0 exposed
I0823 01:18:30.417] service/busybox1 exposed
I0823 01:18:30.418] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:30.418] has:Object 'Kind' is missing
I0823 01:18:30.511] generic-resources.sh:366: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:30.607] (Bgeneric-resources.sh:367: Successful get rc busybox0 {{.spec.replicas}}: 1
I0823 01:18:30.695] (Bgeneric-resources.sh:368: Successful get rc busybox1 {{.spec.replicas}}: 1
I0823 01:18:30.891] (Bgeneric-resources.sh:372: Successful get rc busybox0 {{.spec.replicas}}: 2
I0823 01:18:30.987] (Bgeneric-resources.sh:373: Successful get rc busybox1 {{.spec.replicas}}: 2
I0823 01:18:30.990] (BSuccessful
I0823 01:18:30.990] message:replicationcontroller/busybox0 scaled
I0823 01:18:30.990] replicationcontroller/busybox1 scaled
I0823 01:18:30.991] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:30.991] has:Object 'Kind' is missing
I0823 01:18:31.081] generic-resources.sh:378: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:31.271] (Bgeneric-resources.sh:382: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:31.275] (BSuccessful
I0823 01:18:31.275] message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0823 01:18:31.275] replicationcontroller "busybox0" force deleted
I0823 01:18:31.276] replicationcontroller "busybox1" force deleted
I0823 01:18:31.276] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:31.276] has:Object 'Kind' is missing
I0823 01:18:31.362] generic-resources.sh:387: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:31.523] (Bdeployment.apps/nginx1-deployment created
I0823 01:18:31.528] deployment.apps/nginx0-deployment created
W0823 01:18:31.628] E0823 01:18:29.591226   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.629] E0823 01:18:29.720771   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.629] E0823 01:18:29.832487   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.629] E0823 01:18:29.940832   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.630] E0823 01:18:30.592934   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.630] E0823 01:18:30.726088   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.630] I0823 01:18:30.785084   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523104-25218", Name:"busybox0", UID:"de9c8b6b-9449-432b-ad8f-f6685fe30a8e", APIVersion:"v1", ResourceVersion:"1005", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-hrh5s
W0823 01:18:31.631] I0823 01:18:30.793155   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523104-25218", Name:"busybox1", UID:"30441aef-891f-45e5-a6b8-8e2c9d5d63c7", APIVersion:"v1", ResourceVersion:"1009", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-l8wqc
W0823 01:18:31.631] E0823 01:18:30.833912   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.632] E0823 01:18:30.942504   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.632] error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0823 01:18:31.632] I0823 01:18:31.529705   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1566523104-25218", Name:"nginx1-deployment", UID:"2ff88015-a88f-468f-bd36-20061526b526", APIVersion:"apps/v1", ResourceVersion:"1025", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx1-deployment-84f7f49fb7 to 2
W0823 01:18:31.633] I0823 01:18:31.533196   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523104-25218", Name:"nginx1-deployment-84f7f49fb7", UID:"2d2ae199-e3d3-4afd-9d0e-7905b1c20e3e", APIVersion:"apps/v1", ResourceVersion:"1026", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx1-deployment-84f7f49fb7-hq7xk
W0823 01:18:31.633] I0823 01:18:31.534305   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1566523104-25218", Name:"nginx0-deployment", UID:"53f4f4f2-9e02-459e-8611-2b9b39827ada", APIVersion:"apps/v1", ResourceVersion:"1027", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx0-deployment-57475bf54d to 2
W0823 01:18:31.634] I0823 01:18:31.535812   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523104-25218", Name:"nginx0-deployment-57475bf54d", UID:"c582042a-5f86-4600-b8d7-e616e6108e56", APIVersion:"apps/v1", ResourceVersion:"1028", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx0-deployment-57475bf54d-mj454
W0823 01:18:31.634] I0823 01:18:31.538576   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523104-25218", Name:"nginx1-deployment-84f7f49fb7", UID:"2d2ae199-e3d3-4afd-9d0e-7905b1c20e3e", APIVersion:"apps/v1", ResourceVersion:"1026", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx1-deployment-84f7f49fb7-mdqp4
W0823 01:18:31.635] I0823 01:18:31.543015   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523104-25218", Name:"nginx0-deployment-57475bf54d", UID:"c582042a-5f86-4600-b8d7-e616e6108e56", APIVersion:"apps/v1", ResourceVersion:"1028", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx0-deployment-57475bf54d-9znt4
W0823 01:18:31.635] E0823 01:18:31.594239   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:31.728] E0823 01:18:31.727229   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:31.828] generic-resources.sh:391: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment:
I0823 01:18:31.829] (Bgeneric-resources.sh:392: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9:
I0823 01:18:31.946] (Bgeneric-resources.sh:396: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9:
I0823 01:18:31.949] (BSuccessful
I0823 01:18:31.949] message:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1)
I0823 01:18:31.949] deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1)
I0823 01:18:31.950] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0823 01:18:31.950] has:Object 'Kind' is missing
W0823 01:18:32.051] E0823 01:18:31.835374   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:32.051] E0823 01:18:31.943868   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:32.152] deployment.apps/nginx1-deployment paused
I0823 01:18:32.152] deployment.apps/nginx0-deployment paused
I0823 01:18:32.168] generic-resources.sh:404: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true:
I0823 01:18:32.171] (BSuccessful
I0823 01:18:32.171] message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0823 01:18:32.172] has:Object 'Kind' is missing
... skipping 9 lines ...
I0823 01:18:32.505] 1         <none>
I0823 01:18:32.505] 
I0823 01:18:32.505] deployment.apps/nginx0-deployment 
I0823 01:18:32.505] REVISION  CHANGE-CAUSE
I0823 01:18:32.505] 1         <none>
I0823 01:18:32.505] 
I0823 01:18:32.506] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0823 01:18:32.506] has:nginx0-deployment
I0823 01:18:32.506] Successful
I0823 01:18:32.507] message:deployment.apps/nginx1-deployment 
I0823 01:18:32.507] REVISION  CHANGE-CAUSE
I0823 01:18:32.507] 1         <none>
I0823 01:18:32.507] 
I0823 01:18:32.507] deployment.apps/nginx0-deployment 
I0823 01:18:32.507] REVISION  CHANGE-CAUSE
I0823 01:18:32.507] 1         <none>
I0823 01:18:32.507] 
I0823 01:18:32.507] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0823 01:18:32.508] has:nginx1-deployment
I0823 01:18:32.509] Successful
I0823 01:18:32.509] message:deployment.apps/nginx1-deployment 
I0823 01:18:32.509] REVISION  CHANGE-CAUSE
I0823 01:18:32.509] 1         <none>
I0823 01:18:32.509] 
I0823 01:18:32.509] deployment.apps/nginx0-deployment 
I0823 01:18:32.509] REVISION  CHANGE-CAUSE
I0823 01:18:32.509] 1         <none>
I0823 01:18:32.510] 
I0823 01:18:32.510] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0823 01:18:32.510] has:Object 'Kind' is missing
I0823 01:18:32.588] deployment.apps "nginx1-deployment" force deleted
I0823 01:18:32.593] deployment.apps "nginx0-deployment" force deleted
W0823 01:18:32.694] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0823 01:18:32.695] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
W0823 01:18:32.695] E0823 01:18:32.595869   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:32.730] E0823 01:18:32.729643   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:32.837] E0823 01:18:32.837136   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:32.945] E0823 01:18:32.945251   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:33.599] E0823 01:18:33.598541   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:33.699] generic-resources.sh:426: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:33.847] (Breplicationcontroller/busybox0 created
I0823 01:18:33.853] replicationcontroller/busybox1 created
W0823 01:18:33.954] E0823 01:18:33.731282   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:33.955] E0823 01:18:33.838583   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:33.955] I0823 01:18:33.851687   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523104-25218", Name:"busybox0", UID:"82cdb295-8a00-41ae-9401-3b2b964444c2", APIVersion:"v1", ResourceVersion:"1074", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-c8shx
W0823 01:18:33.956] error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0823 01:18:33.956] I0823 01:18:33.858686   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523104-25218", Name:"busybox1", UID:"cb447eae-7e2e-492f-82cc-8aa20fa69619", APIVersion:"v1", ResourceVersion:"1076", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-ptcb6
W0823 01:18:33.956] E0823 01:18:33.946848   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:34.057] generic-resources.sh:430: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0823 01:18:34.057] (BSuccessful
I0823 01:18:34.057] message:no rollbacker has been implemented for "ReplicationController"
I0823 01:18:34.057] no rollbacker has been implemented for "ReplicationController"
I0823 01:18:34.058] unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:34.058] has:no rollbacker has been implemented for "ReplicationController"
I0823 01:18:34.058] Successful
I0823 01:18:34.058] message:no rollbacker has been implemented for "ReplicationController"
I0823 01:18:34.058] no rollbacker has been implemented for "ReplicationController"
I0823 01:18:34.059] unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:34.059] has:Object 'Kind' is missing
I0823 01:18:34.150] Successful
I0823 01:18:34.151] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:34.151] error: replicationcontrollers "busybox0" pausing is not supported
I0823 01:18:34.151] error: replicationcontrollers "busybox1" pausing is not supported
I0823 01:18:34.151] has:Object 'Kind' is missing
I0823 01:18:34.153] Successful
I0823 01:18:34.153] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:34.153] error: replicationcontrollers "busybox0" pausing is not supported
I0823 01:18:34.154] error: replicationcontrollers "busybox1" pausing is not supported
I0823 01:18:34.154] has:replicationcontrollers "busybox0" pausing is not supported
I0823 01:18:34.155] Successful
I0823 01:18:34.155] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:34.156] error: replicationcontrollers "busybox0" pausing is not supported
I0823 01:18:34.156] error: replicationcontrollers "busybox1" pausing is not supported
I0823 01:18:34.156] has:replicationcontrollers "busybox1" pausing is not supported
I0823 01:18:34.257] Successful
I0823 01:18:34.257] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:34.258] error: replicationcontrollers "busybox0" resuming is not supported
I0823 01:18:34.258] error: replicationcontrollers "busybox1" resuming is not supported
I0823 01:18:34.258] has:Object 'Kind' is missing
I0823 01:18:34.259] Successful
I0823 01:18:34.260] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:34.260] error: replicationcontrollers "busybox0" resuming is not supported
I0823 01:18:34.260] error: replicationcontrollers "busybox1" resuming is not supported
I0823 01:18:34.261] has:replicationcontrollers "busybox0" resuming is not supported
I0823 01:18:34.262] Successful
I0823 01:18:34.263] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0823 01:18:34.263] error: replicationcontrollers "busybox0" resuming is not supported
I0823 01:18:34.263] error: replicationcontrollers "busybox1" resuming is not supported
I0823 01:18:34.264] has:replicationcontrollers "busybox0" resuming is not supported
I0823 01:18:34.344] replicationcontroller "busybox0" force deleted
I0823 01:18:34.349] replicationcontroller "busybox1" force deleted
W0823 01:18:34.450] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0823 01:18:34.451] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
W0823 01:18:34.600] E0823 01:18:34.599932   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:34.733] E0823 01:18:34.732540   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:34.841] E0823 01:18:34.840401   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:34.948] E0823 01:18:34.948248   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:35.358] Recording: run_namespace_tests
I0823 01:18:35.358] Running command: run_namespace_tests
I0823 01:18:35.384] 
I0823 01:18:35.388] +++ Running case: test-cmd.run_namespace_tests 
I0823 01:18:35.391] +++ working dir: /go/src/k8s.io/kubernetes
I0823 01:18:35.394] +++ command: run_namespace_tests
I0823 01:18:35.403] +++ [0823 01:18:35] Testing kubectl(v1:namespaces)
I0823 01:18:35.479] namespace/my-namespace created
I0823 01:18:35.580] core.sh:1308: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace
I0823 01:18:35.659] (Bnamespace "my-namespace" deleted
W0823 01:18:35.760] E0823 01:18:35.601313   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:35.761] E0823 01:18:35.733760   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:35.842] E0823 01:18:35.841650   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:35.950] E0823 01:18:35.949761   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:36.603] E0823 01:18:36.602773   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:36.735] E0823 01:18:36.735236   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:36.843] E0823 01:18:36.843238   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:36.951] E0823 01:18:36.951333   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:37.605] E0823 01:18:37.604505   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:37.738] E0823 01:18:37.737191   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:37.845] E0823 01:18:37.845163   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:37.953] E0823 01:18:37.952940   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:38.607] E0823 01:18:38.606252   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:38.739] E0823 01:18:38.738865   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:38.847] E0823 01:18:38.847066   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:38.955] E0823 01:18:38.954429   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:39.608] E0823 01:18:39.607839   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:39.741] E0823 01:18:39.741305   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:39.849] E0823 01:18:39.848986   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:39.958] E0823 01:18:39.957577   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:40.610] E0823 01:18:40.609845   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:40.744] E0823 01:18:40.743206   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:40.844] namespace/my-namespace condition met
I0823 01:18:40.845] Successful
I0823 01:18:40.845] message:Error from server (NotFound): namespaces "my-namespace" not found
I0823 01:18:40.845] has: not found
I0823 01:18:40.923] namespace/my-namespace created
W0823 01:18:41.024] E0823 01:18:40.852565   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:41.025] E0823 01:18:40.959393   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:41.125] core.sh:1317: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace
I0823 01:18:41.241] (BSuccessful
I0823 01:18:41.241] message:warning: deleting cluster-scoped resources, not scoped to the provided namespace
I0823 01:18:41.242] namespace "kube-node-lease" deleted
I0823 01:18:41.242] namespace "my-namespace" deleted
I0823 01:18:41.242] namespace "namespace-1566522969-27065" deleted
... skipping 27 lines ...
I0823 01:18:41.246] namespace "namespace-1566523067-20806" deleted
I0823 01:18:41.246] namespace "namespace-1566523069-14359" deleted
I0823 01:18:41.247] namespace "namespace-1566523071-6207" deleted
I0823 01:18:41.247] namespace "namespace-1566523072-23551" deleted
I0823 01:18:41.247] namespace "namespace-1566523104-25218" deleted
I0823 01:18:41.247] namespace "namespace-1566523104-26827" deleted
I0823 01:18:41.247] Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted
I0823 01:18:41.248] Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted
I0823 01:18:41.248] Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted
I0823 01:18:41.248] has:warning: deleting cluster-scoped resources
I0823 01:18:41.248] Successful
I0823 01:18:41.248] message:warning: deleting cluster-scoped resources, not scoped to the provided namespace
I0823 01:18:41.248] namespace "kube-node-lease" deleted
I0823 01:18:41.248] namespace "my-namespace" deleted
I0823 01:18:41.249] namespace "namespace-1566522969-27065" deleted
... skipping 27 lines ...
I0823 01:18:41.253] namespace "namespace-1566523067-20806" deleted
I0823 01:18:41.253] namespace "namespace-1566523069-14359" deleted
I0823 01:18:41.253] namespace "namespace-1566523071-6207" deleted
I0823 01:18:41.253] namespace "namespace-1566523072-23551" deleted
I0823 01:18:41.254] namespace "namespace-1566523104-25218" deleted
I0823 01:18:41.254] namespace "namespace-1566523104-26827" deleted
I0823 01:18:41.254] Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted
I0823 01:18:41.254] Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted
I0823 01:18:41.254] Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted
I0823 01:18:41.255] has:namespace "my-namespace" deleted
I0823 01:18:41.354] core.sh:1329: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"other\" }}found{{end}}{{end}}:: :
I0823 01:18:41.452] (Bnamespace/other created
W0823 01:18:41.552] I0823 01:18:41.526247   52841 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller
W0823 01:18:41.612] E0823 01:18:41.611693   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:41.627] I0823 01:18:41.626670   52841 controller_utils.go:1036] Caches are synced for garbage collector controller
W0823 01:18:41.634] I0823 01:18:41.633526   52841 controller_utils.go:1029] Waiting for caches to sync for resource quota controller
W0823 01:18:41.734] I0823 01:18:41.733801   52841 controller_utils.go:1036] Caches are synced for resource quota controller
W0823 01:18:41.744] E0823 01:18:41.744325   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:41.845] core.sh:1333: Successful get namespaces/other {{.metadata.name}}: other
I0823 01:18:41.845] (Bcore.sh:1337: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:41.845] (Bpod/valid-pod created
I0823 01:18:41.917] core.sh:1341: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0823 01:18:42.024] (Bcore.sh:1343: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0823 01:18:42.112] (BSuccessful
I0823 01:18:42.113] message:error: a resource cannot be retrieved by name across all namespaces
I0823 01:18:42.113] has:a resource cannot be retrieved by name across all namespaces
W0823 01:18:42.215] E0823 01:18:41.854858   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:42.216] E0823 01:18:41.963334   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:42.316] core.sh:1350: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0823 01:18:42.330] (Bpod "valid-pod" force deleted
W0823 01:18:42.430] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0823 01:18:42.531] core.sh:1354: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:42.552] (Bnamespace "other" deleted
W0823 01:18:42.653] E0823 01:18:42.612990   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:42.746] E0823 01:18:42.745562   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:42.857] E0823 01:18:42.856308   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:42.966] E0823 01:18:42.965103   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:43.615] E0823 01:18:43.614640   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:43.747] E0823 01:18:43.746870   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:43.858] E0823 01:18:43.857941   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:43.968] E0823 01:18:43.968071   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:44.485] I0823 01:18:44.484729   52841 horizontal.go:341] Horizontal Pod Autoscaler busybox0 has been deleted in namespace-1566523104-25218
W0823 01:18:44.490] I0823 01:18:44.490064   52841 horizontal.go:341] Horizontal Pod Autoscaler busybox1 has been deleted in namespace-1566523104-25218
W0823 01:18:44.616] E0823 01:18:44.616020   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:44.748] E0823 01:18:44.748267   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:44.860] E0823 01:18:44.859783   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:44.969] E0823 01:18:44.969283   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:45.618] E0823 01:18:45.617463   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:45.750] E0823 01:18:45.750146   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:45.862] E0823 01:18:45.861601   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:45.972] E0823 01:18:45.971549   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:46.623] E0823 01:18:46.620617   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:46.753] E0823 01:18:46.751993   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:46.864] E0823 01:18:46.862840   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:46.973] E0823 01:18:46.972690   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:47.652] E0823 01:18:47.652152   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:47.753] +++ exit code: 0
I0823 01:18:47.753] Recording: run_secrets_test
I0823 01:18:47.753] Running command: run_secrets_test
I0823 01:18:47.781] 
I0823 01:18:47.784] +++ Running case: test-cmd.run_secrets_test 
I0823 01:18:47.789] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 35 lines ...
I0823 01:18:48.104]   key1: dmFsdWUx
I0823 01:18:48.104] kind: Secret
I0823 01:18:48.104] metadata:
I0823 01:18:48.104]   creationTimestamp: null
I0823 01:18:48.104]   name: test
I0823 01:18:48.105] has not:example.com
W0823 01:18:48.205] E0823 01:18:47.754101   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:48.205] E0823 01:18:47.864251   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:48.206] E0823 01:18:47.974821   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:48.206] I0823 01:18:48.085124   68910 loader.go:375] Config loaded from file:  /tmp/tmp.3axnv6CTxv/.kube/config
I0823 01:18:48.306] core.sh:725: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-secrets\" }}found{{end}}{{end}}:: :
I0823 01:18:48.307] (Bnamespace/test-secrets created
I0823 01:18:48.387] core.sh:729: Successful get namespaces/test-secrets {{.metadata.name}}: test-secrets
I0823 01:18:48.468] (Bcore.sh:733: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:48.555] (Bsecret/test-secret created
... skipping 11 lines ...
I0823 01:18:49.716] (Bcore.sh:767: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/tls
I0823 01:18:49.791] (Bsecret "test-secret" deleted
I0823 01:18:49.871] secret/test-secret created
I0823 01:18:49.975] core.sh:773: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret
I0823 01:18:50.081] (Bcore.sh:774: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/tls
I0823 01:18:50.160] (Bsecret "test-secret" deleted
W0823 01:18:50.261] E0823 01:18:48.653409   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:50.261] E0823 01:18:48.755456   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:50.261] E0823 01:18:48.865801   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:50.262] E0823 01:18:48.976251   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:50.262] E0823 01:18:49.654710   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:50.262] E0823 01:18:49.756753   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:50.262] E0823 01:18:49.867540   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:50.262] E0823 01:18:49.977816   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:50.363] secret/secret-string-data created
I0823 01:18:50.423] core.sh:796: Successful get secret/secret-string-data --namespace=test-secrets  {{.data}}: map[k1:djE= k2:djI=]
I0823 01:18:50.519] (Bcore.sh:797: Successful get secret/secret-string-data --namespace=test-secrets  {{.data}}: map[k1:djE= k2:djI=]
I0823 01:18:50.605] (Bcore.sh:798: Successful get secret/secret-string-data --namespace=test-secrets  {{.stringData}}: <no value>
I0823 01:18:50.682] (Bsecret "secret-string-data" deleted
I0823 01:18:50.777] core.sh:807: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:18:50.939] (Bsecret "test-secret" deleted
I0823 01:18:51.026] namespace "test-secrets" deleted
W0823 01:18:51.126] E0823 01:18:50.656356   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:51.127] E0823 01:18:50.758214   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:51.127] E0823 01:18:50.868706   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:51.128] E0823 01:18:50.979296   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:51.659] E0823 01:18:51.658368   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:51.760] E0823 01:18:51.759463   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:51.870] E0823 01:18:51.869950   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:51.981] E0823 01:18:51.980826   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:52.660] E0823 01:18:52.659778   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:52.761] E0823 01:18:52.760638   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:52.872] E0823 01:18:52.871429   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:52.982] E0823 01:18:52.982135   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:53.661] E0823 01:18:53.661220   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:53.762] E0823 01:18:53.761909   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:53.873] E0823 01:18:53.872806   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:53.984] E0823 01:18:53.983561   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:54.663] E0823 01:18:54.662699   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:54.764] E0823 01:18:54.763487   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:54.874] E0823 01:18:54.873998   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:54.985] E0823 01:18:54.984978   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:55.664] E0823 01:18:55.664019   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:55.765] E0823 01:18:55.764847   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:55.876] E0823 01:18:55.875457   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:55.986] E0823 01:18:55.986222   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:18:56.122] +++ exit code: 0
I0823 01:18:56.158] Recording: run_configmap_tests
I0823 01:18:56.158] Running command: run_configmap_tests
I0823 01:18:56.182] 
I0823 01:18:56.184] +++ Running case: test-cmd.run_configmap_tests 
I0823 01:18:56.186] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 14 lines ...
I0823 01:18:57.328] configmap/test-binary-configmap created
I0823 01:18:57.426] core.sh:48: Successful get configmap/test-configmap --namespace=test-configmaps {{.metadata.name}}: test-configmap
I0823 01:18:57.514] (Bcore.sh:49: Successful get configmap/test-binary-configmap --namespace=test-configmaps {{.metadata.name}}: test-binary-configmap
I0823 01:18:57.761] (Bconfigmap "test-configmap" deleted
I0823 01:18:57.842] configmap "test-binary-configmap" deleted
I0823 01:18:57.925] namespace "test-configmaps" deleted
W0823 01:18:58.025] E0823 01:18:56.666575   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.026] E0823 01:18:56.766221   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.026] E0823 01:18:56.877464   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.026] E0823 01:18:56.987461   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.027] E0823 01:18:57.667839   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.027] E0823 01:18:57.767349   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.027] E0823 01:18:57.880302   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.027] E0823 01:18:57.988712   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.671] E0823 01:18:58.670160   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.769] E0823 01:18:58.768786   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.882] E0823 01:18:58.881642   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:58.990] E0823 01:18:58.990085   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:59.672] E0823 01:18:59.671691   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:59.771] E0823 01:18:59.770375   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:59.884] E0823 01:18:59.883428   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:18:59.992] E0823 01:18:59.991935   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:00.674] E0823 01:19:00.673530   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:00.772] E0823 01:19:00.772073   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:00.885] E0823 01:19:00.884756   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:00.994] E0823 01:19:00.993708   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:01.675] E0823 01:19:01.675193   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:01.774] E0823 01:19:01.774082   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:01.886] E0823 01:19:01.886306   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:01.996] E0823 01:19:01.995441   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:02.677] E0823 01:19:02.677140   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:02.777] E0823 01:19:02.775983   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:02.888] E0823 01:19:02.887941   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:02.997] E0823 01:19:02.996598   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:03.098] +++ exit code: 0
I0823 01:19:03.118] Recording: run_client_config_tests
I0823 01:19:03.119] Running command: run_client_config_tests
I0823 01:19:03.154] 
I0823 01:19:03.158] +++ Running case: test-cmd.run_client_config_tests 
I0823 01:19:03.161] +++ working dir: /go/src/k8s.io/kubernetes
I0823 01:19:03.165] +++ command: run_client_config_tests
I0823 01:19:03.180] +++ [0823 01:19:03] Creating namespace namespace-1566523143-31642
I0823 01:19:03.263] namespace/namespace-1566523143-31642 created
I0823 01:19:03.345] Context "test" modified.
I0823 01:19:03.355] +++ [0823 01:19:03] Testing client config
I0823 01:19:03.433] Successful
I0823 01:19:03.434] message:error: stat missing: no such file or directory
I0823 01:19:03.434] has:missing: no such file or directory
I0823 01:19:03.515] Successful
I0823 01:19:03.515] message:error: stat missing: no such file or directory
I0823 01:19:03.515] has:missing: no such file or directory
I0823 01:19:03.593] Successful
I0823 01:19:03.594] message:error: stat missing: no such file or directory
I0823 01:19:03.594] has:missing: no such file or directory
I0823 01:19:03.669] Successful
I0823 01:19:03.670] message:Error in configuration: context was not found for specified context: missing-context
I0823 01:19:03.670] has:context was not found for specified context: missing-context
I0823 01:19:03.759] Successful
I0823 01:19:03.760] message:error: no server found for cluster "missing-cluster"
I0823 01:19:03.760] has:no server found for cluster "missing-cluster"
I0823 01:19:03.841] Successful
I0823 01:19:03.842] message:error: auth info "missing-user" does not exist
I0823 01:19:03.842] has:auth info "missing-user" does not exist
W0823 01:19:03.943] E0823 01:19:03.679238   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:03.943] E0823 01:19:03.777548   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:03.944] E0823 01:19:03.889198   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:03.999] E0823 01:19:03.998271   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:04.100] Successful
I0823 01:19:04.100] message:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50"
I0823 01:19:04.100] has:error loading config file
I0823 01:19:04.100] Successful
I0823 01:19:04.101] message:error: stat missing-config: no such file or directory
I0823 01:19:04.101] has:no such file or directory
I0823 01:19:04.119] +++ exit code: 0
I0823 01:19:04.167] Recording: run_service_accounts_tests
I0823 01:19:04.167] Running command: run_service_accounts_tests
I0823 01:19:04.201] 
I0823 01:19:04.205] +++ Running case: test-cmd.run_service_accounts_tests 
... skipping 7 lines ...
I0823 01:19:04.628] (Bnamespace/test-service-accounts created
I0823 01:19:04.733] core.sh:832: Successful get namespaces/test-service-accounts {{.metadata.name}}: test-service-accounts
I0823 01:19:04.817] (Bserviceaccount/test-service-account created
I0823 01:19:04.921] core.sh:838: Successful get serviceaccount/test-service-account --namespace=test-service-accounts {{.metadata.name}}: test-service-account
I0823 01:19:05.008] (Bserviceaccount "test-service-account" deleted
I0823 01:19:05.098] namespace "test-service-accounts" deleted
W0823 01:19:05.198] E0823 01:19:04.681025   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:05.199] E0823 01:19:04.779355   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:05.199] E0823 01:19:04.890985   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:05.200] E0823 01:19:05.000199   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:05.685] E0823 01:19:05.684133   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:05.781] E0823 01:19:05.780966   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:05.893] E0823 01:19:05.892756   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:06.002] E0823 01:19:06.002001   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:06.686] E0823 01:19:06.685748   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:06.783] E0823 01:19:06.782704   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:06.895] E0823 01:19:06.895100   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:07.005] E0823 01:19:07.004840   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:07.688] E0823 01:19:07.687354   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:07.785] E0823 01:19:07.784562   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:07.897] E0823 01:19:07.896932   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:08.007] E0823 01:19:08.006682   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:08.689] E0823 01:19:08.688984   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:08.787] E0823 01:19:08.786261   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:08.899] E0823 01:19:08.898926   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:09.009] E0823 01:19:09.008374   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:09.691] E0823 01:19:09.690606   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:09.788] E0823 01:19:09.787972   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:09.901] E0823 01:19:09.900688   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:10.010] E0823 01:19:10.009836   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:10.233] +++ exit code: 0
I0823 01:19:10.285] Recording: run_job_tests
I0823 01:19:10.286] Running command: run_job_tests
I0823 01:19:10.321] 
I0823 01:19:10.325] +++ Running case: test-cmd.run_job_tests 
I0823 01:19:10.330] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 14 lines ...
I0823 01:19:11.137] Labels:                        run=pi
I0823 01:19:11.137] Annotations:                   <none>
I0823 01:19:11.137] Schedule:                      59 23 31 2 *
I0823 01:19:11.137] Concurrency Policy:            Allow
I0823 01:19:11.137] Suspend:                       False
I0823 01:19:11.137] Successful Job History Limit:  3
I0823 01:19:11.138] Failed Job History Limit:      1
I0823 01:19:11.138] Starting Deadline Seconds:     <unset>
I0823 01:19:11.138] Selector:                      <unset>
I0823 01:19:11.138] Parallelism:                   <unset>
I0823 01:19:11.138] Completions:                   <unset>
I0823 01:19:11.138] Pod Template:
I0823 01:19:11.138]   Labels:  run=pi
... skipping 32 lines ...
I0823 01:19:11.694]                 run=pi
I0823 01:19:11.694] Annotations:    cronjob.kubernetes.io/instantiate: manual
I0823 01:19:11.694] Controlled By:  CronJob/pi
I0823 01:19:11.694] Parallelism:    1
I0823 01:19:11.694] Completions:    1
I0823 01:19:11.695] Start Time:     Fri, 23 Aug 2019 01:19:11 +0000
I0823 01:19:11.695] Pods Statuses:  1 Running / 0 Succeeded / 0 Failed
I0823 01:19:11.695] Pod Template:
I0823 01:19:11.695]   Labels:  controller-uid=16293429-150b-4f2b-b234-4cdd73b81b3c
I0823 01:19:11.695]            job-name=test-job
I0823 01:19:11.695]            run=pi
I0823 01:19:11.695]   Containers:
I0823 01:19:11.696]    pi:
... skipping 15 lines ...
I0823 01:19:11.698]   Type    Reason            Age   From            Message
I0823 01:19:11.698]   ----    ------            ----  ----            -------
I0823 01:19:11.698]   Normal  SuccessfulCreate  0s    job-controller  Created pod: test-job-gg4xr
I0823 01:19:11.783] job.batch "test-job" deleted
I0823 01:19:11.872] cronjob.batch "pi" deleted
I0823 01:19:11.961] namespace "test-jobs" deleted
W0823 01:19:12.062] E0823 01:19:10.691775   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.062] E0823 01:19:10.789353   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.062] kubectl run --generator=cronjob/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0823 01:19:12.063] E0823 01:19:10.902072   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.063] E0823 01:19:11.011416   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.063] I0823 01:19:11.411092   52841 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"test-jobs", Name:"test-job", UID:"16293429-150b-4f2b-b234-4cdd73b81b3c", APIVersion:"batch/v1", ResourceVersion:"1356", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-gg4xr
W0823 01:19:12.063] E0823 01:19:11.699387   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.064] E0823 01:19:11.790349   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.064] E0823 01:19:11.903469   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.064] E0823 01:19:12.012731   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.701] E0823 01:19:12.700929   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.792] E0823 01:19:12.791668   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:12.905] E0823 01:19:12.904887   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:13.014] E0823 01:19:13.014015   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:13.703] E0823 01:19:13.702370   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:13.793] E0823 01:19:13.793058   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:13.906] E0823 01:19:13.906287   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:14.017] E0823 01:19:14.016665   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:14.704] E0823 01:19:14.704145   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:14.796] E0823 01:19:14.796168   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:14.908] E0823 01:19:14.907785   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:15.019] E0823 01:19:15.019063   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:15.706] E0823 01:19:15.706076   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:15.798] E0823 01:19:15.797882   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:15.910] E0823 01:19:15.909242   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:16.020] E0823 01:19:16.020231   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:16.708] E0823 01:19:16.707475   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:16.800] E0823 01:19:16.799592   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:16.911] E0823 01:19:16.910942   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:17.021] E0823 01:19:17.021024   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:17.122] +++ exit code: 0
I0823 01:19:17.127] Recording: run_create_job_tests
I0823 01:19:17.128] Running command: run_create_job_tests
I0823 01:19:17.151] 
I0823 01:19:17.154] +++ Running case: test-cmd.run_create_job_tests 
I0823 01:19:17.157] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 29 lines ...
I0823 01:19:18.706] core.sh:1415: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:19:18.869] (Bpodtemplate/nginx created
I0823 01:19:18.973] core.sh:1419: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0823 01:19:19.053] (BNAME    CONTAINERS   IMAGES   POD LABELS
I0823 01:19:19.053] nginx   nginx        nginx    name=nginx
W0823 01:19:19.154] I0823 01:19:17.683654   52841 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1566523157-24580", Name:"test-job-pi", UID:"20a79434-85f5-47aa-9851-0508bfa6d162", APIVersion:"batch/v1", ResourceVersion:"1381", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-pi-gnhqh
W0823 01:19:19.154] E0823 01:19:17.708827   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:19.154] E0823 01:19:17.800945   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:19.155] E0823 01:19:17.912421   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:19.155] kubectl run --generator=cronjob/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0823 01:19:19.155] E0823 01:19:18.022454   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:19.155] I0823 01:19:18.051606   52841 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1566523157-24580", Name:"my-pi", UID:"7b0c006a-1bb5-4a70-a0d6-646281a8f6ab", APIVersion:"batch/v1", ResourceVersion:"1389", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: my-pi-4h8wg
W0823 01:19:19.156] E0823 01:19:18.711359   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:19.156] E0823 01:19:18.802260   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:19.156] I0823 01:19:18.866382   49405 controller.go:606] quota admission added evaluator for: podtemplates
W0823 01:19:19.156] E0823 01:19:18.913723   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:19.156] E0823 01:19:19.023879   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:19.257] core.sh:1427: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0823 01:19:19.333] (Bpodtemplate "nginx" deleted
I0823 01:19:19.465] core.sh:1431: Successful get podtemplate {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:19:19.479] (B+++ exit code: 0
I0823 01:19:19.516] Recording: run_service_tests
I0823 01:19:19.516] Running command: run_service_tests
... skipping 65 lines ...
I0823 01:19:20.421] Port:              <unset>  6379/TCP
I0823 01:19:20.421] TargetPort:        6379/TCP
I0823 01:19:20.421] Endpoints:         <none>
I0823 01:19:20.421] Session Affinity:  None
I0823 01:19:20.421] Events:            <none>
I0823 01:19:20.421] (B
W0823 01:19:20.523] E0823 01:19:19.712499   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:20.523] E0823 01:19:19.803719   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:20.523] E0823 01:19:19.915237   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:20.524] E0823 01:19:20.025865   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:20.624] Successful describe services:
I0823 01:19:20.624] Name:              kubernetes
I0823 01:19:20.624] Namespace:         default
I0823 01:19:20.625] Labels:            component=apiserver
I0823 01:19:20.625]                    provider=kubernetes
I0823 01:19:20.625] Annotations:       <none>
... skipping 232 lines ...
I0823 01:19:21.602]   selector:
I0823 01:19:21.602]     role: padawan
I0823 01:19:21.602]   sessionAffinity: None
I0823 01:19:21.602]   type: ClusterIP
I0823 01:19:21.602] status:
I0823 01:19:21.602]   loadBalancer: {}
W0823 01:19:21.703] E0823 01:19:20.714301   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:21.703] E0823 01:19:20.805317   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:21.703] E0823 01:19:20.916683   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:21.704] E0823 01:19:21.027654   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:21.704] error: you must specify resources by --filename when --local is set.
W0823 01:19:21.704] Example resource specifications include:
W0823 01:19:21.704]    '-f rsrc.yaml'
W0823 01:19:21.704]    '--filename=rsrc.json'
W0823 01:19:21.716] E0823 01:19:21.715446   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:21.807] E0823 01:19:21.806506   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:21.907] core.sh:898: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend:
I0823 01:19:21.956] (Bcore.sh:905: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
I0823 01:19:22.043] (Bservice "redis-master" deleted
I0823 01:19:22.145] core.sh:912: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0823 01:19:22.237] (Bcore.sh:916: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0823 01:19:22.413] (Bservice/redis-master created
I0823 01:19:22.518] core.sh:920: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
I0823 01:19:22.615] (Bcore.sh:924: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
I0823 01:19:22.771] (Bservice/service-v1-test created
W0823 01:19:22.871] E0823 01:19:21.918171   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:22.872] E0823 01:19:22.029324   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:22.872] E0823 01:19:22.716722   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:22.872] E0823 01:19:22.807811   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:22.920] E0823 01:19:22.920237   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:23.021] core.sh:945: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test:
I0823 01:19:23.047] (Bservice/service-v1-test replaced
I0823 01:19:23.151] core.sh:952: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test:
I0823 01:19:23.236] (Bservice "redis-master" deleted
W0823 01:19:23.336] E0823 01:19:23.030984   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:23.437] service "service-v1-test" deleted
I0823 01:19:23.466] core.sh:960: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0823 01:19:23.564] (Bcore.sh:964: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0823 01:19:23.726] (Bservice/redis-master created
W0823 01:19:23.827] E0823 01:19:23.719000   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:23.828] E0823 01:19:23.809493   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:23.922] E0823 01:19:23.921496   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:24.022] service/redis-slave created
I0823 01:19:24.023] core.sh:969: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:redis-slave:
I0823 01:19:24.107] (BSuccessful
I0823 01:19:24.107] message:NAME           RSRC
I0823 01:19:24.107] kubernetes     145
I0823 01:19:24.107] redis-master   1426
I0823 01:19:24.107] redis-slave    1429
I0823 01:19:24.107] has:redis-master
I0823 01:19:24.204] core.sh:979: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:redis-slave:
I0823 01:19:24.286] (Bservice "redis-master" deleted
I0823 01:19:24.293] service "redis-slave" deleted
W0823 01:19:24.394] E0823 01:19:24.032216   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:24.494] core.sh:986: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0823 01:19:24.519] (Bcore.sh:990: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0823 01:19:24.600] (Bservice/beep-boop created
I0823 01:19:24.705] core.sh:994: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: beep-boop:kubernetes:
I0823 01:19:24.815] (Bcore.sh:998: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: beep-boop:kubernetes:
I0823 01:19:24.937] (Bservice "beep-boop" deleted
W0823 01:19:25.038] E0823 01:19:24.720959   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:25.038] E0823 01:19:24.813377   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:25.038] E0823 01:19:24.923496   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:25.039] E0823 01:19:25.034139   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:25.139] core.sh:1005: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0823 01:19:25.170] (Bcore.sh:1009: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:19:25.269] (Bservice/testmetadata created
I0823 01:19:25.270] deployment.apps/testmetadata created
I0823 01:19:25.373] core.sh:1013: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: testmetadata:
I0823 01:19:25.474] (Bcore.sh:1014: Successful get service testmetadata {{.metadata.annotations}}: map[zone-context:home]
... skipping 16 lines ...
I0823 01:19:26.281] apps.sh:30: Successful get daemonsets {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:19:26.538] (Bdaemonset.apps/bind created
W0823 01:19:26.639] kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0823 01:19:26.640] I0823 01:19:25.251113   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"testmetadata", UID:"f860cdf3-d103-49e6-b9e8-c27d3331beff", APIVersion:"apps/v1", ResourceVersion:"1443", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set testmetadata-6cdd84c77d to 2
W0823 01:19:26.640] I0823 01:19:25.255743   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"testmetadata-6cdd84c77d", UID:"1df23db3-766b-45de-997f-c5a628b4de21", APIVersion:"apps/v1", ResourceVersion:"1444", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: testmetadata-6cdd84c77d-5lrp9
W0823 01:19:26.641] I0823 01:19:25.260031   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"testmetadata-6cdd84c77d", UID:"1df23db3-766b-45de-997f-c5a628b4de21", APIVersion:"apps/v1", ResourceVersion:"1444", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: testmetadata-6cdd84c77d-htktg
W0823 01:19:26.641] E0823 01:19:25.722380   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:26.642] E0823 01:19:25.814795   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:26.642] E0823 01:19:25.925085   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:26.643] E0823 01:19:26.035825   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:26.643] I0823 01:19:26.535614   49405 controller.go:606] quota admission added evaluator for: daemonsets.apps
W0823 01:19:26.644] I0823 01:19:26.554613   49405 controller.go:606] quota admission added evaluator for: controllerrevisions.apps
W0823 01:19:26.724] E0823 01:19:26.724074   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:26.816] E0823 01:19:26.815912   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:26.917] apps.sh:34: Successful get daemonsets bind {{.metadata.generation}}: 1
I0823 01:19:26.917] (Bdaemonset.apps/bind configured
I0823 01:19:26.936] apps.sh:37: Successful get daemonsets bind {{.metadata.generation}}: 1
I0823 01:19:27.033] (Bdaemonset.apps/bind image updated
W0823 01:19:27.134] E0823 01:19:26.926178   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:27.135] E0823 01:19:27.037100   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:27.235] apps.sh:40: Successful get daemonsets bind {{.metadata.generation}}: 2
I0823 01:19:27.236] (Bdaemonset.apps/bind env updated
I0823 01:19:27.345] apps.sh:42: Successful get daemonsets bind {{.metadata.generation}}: 3
I0823 01:19:27.436] (Bdaemonset.apps/bind resource requirements updated
I0823 01:19:27.532] apps.sh:44: Successful get daemonsets bind {{.metadata.generation}}: 4
I0823 01:19:27.628] (Bdaemonset.apps/bind restarted
... skipping 9 lines ...
I0823 01:19:27.913] +++ [0823 01:19:27] Creating namespace namespace-1566523167-28415
I0823 01:19:27.995] namespace/namespace-1566523167-28415 created
I0823 01:19:28.091] Context "test" modified.
I0823 01:19:28.100] +++ [0823 01:19:28] Testing kubectl(v1:daemonsets, v1:controllerrevisions)
I0823 01:19:28.190] apps.sh:66: Successful get daemonsets {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:19:28.379] (Bdaemonset.apps/bind created
W0823 01:19:28.480] E0823 01:19:27.725454   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:28.480] E0823 01:19:27.816938   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:28.481] E0823 01:19:27.927579   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:28.482] E0823 01:19:28.038429   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:28.583] apps.sh:70: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:1 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1566523167-28415"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:2.0","name":"kubernetes-pause"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}}
I0823 01:19:28.584]  kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true]:
I0823 01:19:28.598] (Bdaemonset.apps/bind skipped rollback (current template already matches revision 1)
I0823 01:19:28.708] apps.sh:73: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0:
I0823 01:19:28.814] (Bapps.sh:74: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1
I0823 01:19:28.999] (Bdaemonset.apps/bind configured
W0823 01:19:29.100] E0823 01:19:28.727107   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:29.101] E0823 01:19:28.819248   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:29.101] E0823 01:19:28.929291   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:29.102] E0823 01:19:29.040030   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:29.202] apps.sh:77: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest:
I0823 01:19:29.212] (Bapps.sh:78: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
I0823 01:19:29.319] (Bapps.sh:79: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2
I0823 01:19:29.430] (Bapps.sh:80: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:2 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1566523167-28415"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:latest","name":"kubernetes-pause"},{"image":"k8s.gcr.io/nginx:test-cmd","name":"app"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}}
I0823 01:19:29.431]  kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true]:map[deprecated.daemonset.template.generation:1 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1566523167-28415"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:2.0","name":"kubernetes-pause"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}}
I0823 01:19:29.432]  kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true]:
... skipping 12 lines ...
I0823 01:19:29.780] (Bapps.sh:84: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
I0823 01:19:29.885] (Bapps.sh:85: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2
I0823 01:19:29.997] (Bdaemonset.apps/bind rolled back
I0823 01:19:30.102] apps.sh:88: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0:
I0823 01:19:30.207] (Bapps.sh:89: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1
I0823 01:19:30.329] (BSuccessful
I0823 01:19:30.330] message:error: unable to find specified revision 1000000 in history
I0823 01:19:30.330] has:unable to find specified revision
I0823 01:19:30.432] apps.sh:93: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0:
I0823 01:19:30.535] (Bapps.sh:94: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1
I0823 01:19:30.642] (Bdaemonset.apps/bind rolled back
W0823 01:19:30.743] E0823 01:19:29.728670   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:30.744] E0823 01:19:29.820992   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:30.744] E0823 01:19:29.931422   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:30.748] E0823 01:19:30.016212   52841 daemon_controller.go:302] namespace-1566523167-28415/bind failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"bind", GenerateName:"", Namespace:"namespace-1566523167-28415", SelfLink:"/apis/apps/v1/namespaces/namespace-1566523167-28415/daemonsets/bind", UID:"44f5981d-dddd-4756-a45d-e7e91b82dd79", ResourceVersion:"1509", Generation:3, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63702119968, loc:(*time.Location)(0x73d0220)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"3", "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{\"kubernetes.io/change-cause\":\"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true\"},\"labels\":{\"service\":\"bind\"},\"name\":\"bind\",\"namespace\":\"namespace-1566523167-28415\"},\"spec\":{\"selector\":{\"matchLabels\":{\"service\":\"bind\"}},\"template\":{\"metadata\":{\"labels\":{\"service\":\"bind\"}},\"spec\":{\"affinity\":{\"podAntiAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":[{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"service\",\"operator\":\"In\",\"values\":[\"bind\"]}]},\"namespaces\":[],\"topologyKey\":\"kubernetes.io/hostname\"}]}},\"containers\":[{\"image\":\"k8s.gcr.io/pause:latest\",\"name\":\"kubernetes-pause\"},{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"app\"}]}},\"updateStrategy\":{\"rollingUpdate\":{\"maxUnavailable\":\"10%\"},\"type\":\"RollingUpdate\"}}}\n", "kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kube-controller-manager", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001d7ea80), Fields:(*v1.Fields)(0xc001d7eac0)}, v1.ManagedFieldsEntry{Manager:"kubectl", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001d7eb20), Fields:(*v1.Fields)(0xc001d7eb40)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc001d7eb60), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume(nil), InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kubernetes-pause", Image:"k8s.gcr.io/pause:2.0", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc001620d48), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"", DeprecatedServiceAccount:"", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc00218e180), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(0xc001d7eb80), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc001ed4098)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc001620d9c)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:2, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "bind": the object has been modified; please apply your changes to the latest version and try again
W0823 01:19:30.750] E0823 01:19:30.041851   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:30.754] E0823 01:19:30.662705   52841 daemon_controller.go:302] namespace-1566523167-28415/bind failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"bind", GenerateName:"", Namespace:"namespace-1566523167-28415", SelfLink:"/apis/apps/v1/namespaces/namespace-1566523167-28415/daemonsets/bind", UID:"44f5981d-dddd-4756-a45d-e7e91b82dd79", ResourceVersion:"1513", Generation:4, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63702119968, loc:(*time.Location)(0x73d0220)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"4", "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{\"kubernetes.io/change-cause\":\"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true\"},\"labels\":{\"service\":\"bind\"},\"name\":\"bind\",\"namespace\":\"namespace-1566523167-28415\"},\"spec\":{\"selector\":{\"matchLabels\":{\"service\":\"bind\"}},\"template\":{\"metadata\":{\"labels\":{\"service\":\"bind\"}},\"spec\":{\"affinity\":{\"podAntiAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":[{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"service\",\"operator\":\"In\",\"values\":[\"bind\"]}]},\"namespaces\":[],\"topologyKey\":\"kubernetes.io/hostname\"}]}},\"containers\":[{\"image\":\"k8s.gcr.io/pause:latest\",\"name\":\"kubernetes-pause\"},{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"app\"}]}},\"updateStrategy\":{\"rollingUpdate\":{\"maxUnavailable\":\"10%\"},\"type\":\"RollingUpdate\"}}}\n", "kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kube-controller-manager", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001d1d4e0), Fields:(*v1.Fields)(0xc001d1d500)}, v1.ManagedFieldsEntry{Manager:"kubectl", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc001d1d5a0), Fields:(*v1.Fields)(0xc001d1d600)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc001d1d620), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume(nil), InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kubernetes-pause", Image:"k8s.gcr.io/pause:latest", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"app", Image:"k8s.gcr.io/nginx:test-cmd", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc000ad6088), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"", DeprecatedServiceAccount:"", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0020bc9c0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(0xc001d1d660), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc000742dc8)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc000ad611c)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:3, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "bind": the object has been modified; please apply your changes to the latest version and try again
W0823 01:19:30.754] E0823 01:19:30.730096   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:30.823] E0823 01:19:30.822594   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:30.924] apps.sh:97: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest:
I0823 01:19:30.924] (Bapps.sh:98: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
I0823 01:19:30.976] (Bapps.sh:99: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2
I0823 01:19:31.061] (Bdaemonset.apps "bind" deleted
I0823 01:19:31.092] +++ exit code: 0
I0823 01:19:31.156] Recording: run_rc_tests
... skipping 6 lines ...
I0823 01:19:31.321] namespace/namespace-1566523171-3183 created
I0823 01:19:31.397] Context "test" modified.
I0823 01:19:31.407] +++ [0823 01:19:31] Testing kubectl(v1:replicationcontrollers)
I0823 01:19:31.507] core.sh:1046: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:19:31.695] (Breplicationcontroller/frontend created
I0823 01:19:31.795] replicationcontroller "frontend" deleted
W0823 01:19:31.896] E0823 01:19:30.933267   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:31.897] E0823 01:19:31.043551   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:31.897] I0823 01:19:31.703142   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"7490adb1-4912-473b-ad94-dd083b9cb5e1", APIVersion:"v1", ResourceVersion:"1521", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-fk24t
W0823 01:19:31.898] I0823 01:19:31.707496   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"7490adb1-4912-473b-ad94-dd083b9cb5e1", APIVersion:"v1", ResourceVersion:"1521", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-5q6lk
W0823 01:19:31.898] I0823 01:19:31.707907   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"7490adb1-4912-473b-ad94-dd083b9cb5e1", APIVersion:"v1", ResourceVersion:"1521", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-268dl
W0823 01:19:31.898] E0823 01:19:31.731650   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:31.899] E0823 01:19:31.824200   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:31.935] E0823 01:19:31.934890   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:32.036] core.sh:1051: Successful get pods -l "name=frontend" {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:19:32.036] (Bcore.sh:1055: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0823 01:19:32.200] (Breplicationcontroller/frontend created
W0823 01:19:32.301] E0823 01:19:32.045963   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:32.302] I0823 01:19:32.204987   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"3fab8d97-c106-47dc-8ba8-36dd6db50f8b", APIVersion:"v1", ResourceVersion:"1537", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-855xm
W0823 01:19:32.302] I0823 01:19:32.209791   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"3fab8d97-c106-47dc-8ba8-36dd6db50f8b", APIVersion:"v1", ResourceVersion:"1537", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-bwf2q
W0823 01:19:32.303] I0823 01:19:32.209853   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"3fab8d97-c106-47dc-8ba8-36dd6db50f8b", APIVersion:"v1", ResourceVersion:"1537", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-jtszh
I0823 01:19:32.403] core.sh:1059: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: frontend:
I0823 01:19:32.510] (Bcore.sh:1061: Successful describe rc frontend:
I0823 01:19:32.510] Name:         frontend
I0823 01:19:32.510] Namespace:    namespace-1566523171-3183
I0823 01:19:32.510] Selector:     app=guestbook,tier=frontend
I0823 01:19:32.510] Labels:       app=guestbook
I0823 01:19:32.510]               tier=frontend
I0823 01:19:32.511] Annotations:  <none>
I0823 01:19:32.511] Replicas:     3 current / 3 desired
I0823 01:19:32.511] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0823 01:19:32.511] Pod Template:
I0823 01:19:32.511]   Labels:  app=guestbook
I0823 01:19:32.511]            tier=frontend
I0823 01:19:32.511]   Containers:
I0823 01:19:32.511]    php-redis:
I0823 01:19:32.512]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 17 lines ...
I0823 01:19:32.639] Namespace:    namespace-1566523171-3183
I0823 01:19:32.639] Selector:     app=guestbook,tier=frontend
I0823 01:19:32.639] Labels:       app=guestbook
I0823 01:19:32.639]               tier=frontend
I0823 01:19:32.640] Annotations:  <none>
I0823 01:19:32.640] Replicas:     3 current / 3 desired
I0823 01:19:32.640] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0823 01:19:32.640] Pod Template:
I0823 01:19:32.640]   Labels:  app=guestbook
I0823 01:19:32.640]            tier=frontend
I0823 01:19:32.640]   Containers:
I0823 01:19:32.641]    php-redis:
I0823 01:19:32.641]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 10 lines ...
I0823 01:19:32.642]   Type    Reason            Age   From                    Message
I0823 01:19:32.642]   ----    ------            ----  ----                    -------
I0823 01:19:32.642]   Normal  SuccessfulCreate  0s    replication-controller  Created pod: frontend-855xm
I0823 01:19:32.643]   Normal  SuccessfulCreate  0s    replication-controller  Created pod: frontend-bwf2q
I0823 01:19:32.643]   Normal  SuccessfulCreate  0s    replication-controller  Created pod: frontend-jtszh
I0823 01:19:32.643] (B
W0823 01:19:32.743] E0823 01:19:32.734212   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:32.827] E0823 01:19:32.826592   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:32.927] core.sh:1065: Successful describe
I0823 01:19:32.928] Name:         frontend
I0823 01:19:32.928] Namespace:    namespace-1566523171-3183
I0823 01:19:32.928] Selector:     app=guestbook,tier=frontend
I0823 01:19:32.928] Labels:       app=guestbook
I0823 01:19:32.928]               tier=frontend
I0823 01:19:32.928] Annotations:  <none>
I0823 01:19:32.928] Replicas:     3 current / 3 desired
I0823 01:19:32.928] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0823 01:19:32.929] Pod Template:
I0823 01:19:32.929]   Labels:  app=guestbook
I0823 01:19:32.929]            tier=frontend
I0823 01:19:32.929]   Containers:
I0823 01:19:32.929]    php-redis:
I0823 01:19:32.929]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 12 lines ...
I0823 01:19:32.930] Namespace:    namespace-1566523171-3183
I0823 01:19:32.930] Selector:     app=guestbook,tier=frontend
I0823 01:19:32.930] Labels:       app=guestbook
I0823 01:19:32.930]               tier=frontend
I0823 01:19:32.930] Annotations:  <none>
I0823 01:19:32.930] Replicas:     3 current / 3 desired
I0823 01:19:32.931] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0823 01:19:32.931] Pod Template:
I0823 01:19:32.931]   Labels:  app=guestbook
I0823 01:19:32.931]            tier=frontend
I0823 01:19:32.931]   Containers:
I0823 01:19:32.931]    php-redis:
I0823 01:19:32.931]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 10 lines ...
I0823 01:19:32.932]   Type    Reason            Age   From                    Message
I0823 01:19:32.932]   ----    ------            ----  ----                    -------
I0823 01:19:32.932]   Normal  SuccessfulCreate  0s    replication-controller  Created pod: frontend-855xm
I0823 01:19:32.933]   Normal  SuccessfulCreate  0s    replication-controller  Created pod: frontend-bwf2q
I0823 01:19:32.933]   Normal  SuccessfulCreate  0s    replication-controller  Created pod: frontend-jtszh
I0823 01:19:32.933] (B
W0823 01:19:33.034] E0823 01:19:32.937165   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:33.048] E0823 01:19:33.047501   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:33.149] Successful describe rc:
I0823 01:19:33.149] Name:         frontend
I0823 01:19:33.149] Namespace:    namespace-1566523171-3183
I0823 01:19:33.149] Selector:     app=guestbook,tier=frontend
I0823 01:19:33.149] Labels:       app=guestbook
I0823 01:19:33.149]               tier=frontend
I0823 01:19:33.149] Annotations:  <none>
I0823 01:19:33.149] Replicas:     3 current / 3 desired
I0823 01:19:33.150] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0823 01:19:33.150] Pod Template:
I0823 01:19:33.150]   Labels:  app=guestbook
I0823 01:19:33.150]            tier=frontend
I0823 01:19:33.150]   Containers:
I0823 01:19:33.150]    php-redis:
I0823 01:19:33.150]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 17 lines ...
I0823 01:19:33.229] Namespace:    namespace-1566523171-3183
I0823 01:19:33.229] Selector:     app=guestbook,tier=frontend
I0823 01:19:33.229] Labels:       app=guestbook
I0823 01:19:33.229]               tier=frontend
I0823 01:19:33.229] Annotations:  <none>
I0823 01:19:33.229] Replicas:     3 current / 3 desired
I0823 01:19:33.229] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0823 01:19:33.229] Pod Template:
I0823 01:19:33.229]   Labels:  app=guestbook
I0823 01:19:33.229]            tier=frontend
I0823 01:19:33.229]   Containers:
I0823 01:19:33.229]    php-redis:
I0823 01:19:33.230]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 17 lines ...
I0823 01:19:33.388] Namespace:    namespace-1566523171-3183
I0823 01:19:33.388] Selector:     app=guestbook,tier=frontend
I0823 01:19:33.388] Labels:       app=guestbook
I0823 01:19:33.388]               tier=frontend
I0823 01:19:33.388] Annotations:  <none>
I0823 01:19:33.388] Replicas:     3 current / 3 desired
I0823 01:19:33.388] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0823 01:19:33.388] Pod Template:
I0823 01:19:33.389]   Labels:  app=guestbook
I0823 01:19:33.389]            tier=frontend
I0823 01:19:33.389]   Containers:
I0823 01:19:33.389]    php-redis:
I0823 01:19:33.389]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 11 lines ...
I0823 01:19:33.509] Namespace:    namespace-1566523171-3183
I0823 01:19:33.509] Selector:     app=guestbook,tier=frontend
I0823 01:19:33.509] Labels:       app=guestbook
I0823 01:19:33.509]               tier=frontend
I0823 01:19:33.509] Annotations:  <none>
I0823 01:19:33.509] Replicas:     3 current / 3 desired
I0823 01:19:33.509] Pods Status:  0 Running / 3 Waiting / 0 Succeeded / 0 Failed
I0823 01:19:33.510] Pod Template:
I0823 01:19:33.510]   Labels:  app=guestbook
I0823 01:19:33.510]            tier=frontend
I0823 01:19:33.510]   Containers:
I0823 01:19:33.510]    php-redis:
I0823 01:19:33.510]     Image:      gcr.io/google_samples/gb-frontend:v4
... skipping 17 lines ...
I0823 01:19:33.802] core.sh:1083: Successful get rc frontend {{.spec.replicas}}: 2
I0823 01:19:33.896] (Bcore.sh:1087: Successful get rc frontend {{.spec.replicas}}: 2
I0823 01:19:34.077] (Bcore.sh:1091: Successful get rc frontend {{.spec.replicas}}: 2
I0823 01:19:34.167] (Bcore.sh:1095: Successful get rc frontend {{.spec.replicas}}: 2
I0823 01:19:34.254] (Breplicationcontroller/frontend scaled
W0823 01:19:34.354] I0823 01:19:33.708132   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"3fab8d97-c106-47dc-8ba8-36dd6db50f8b", APIVersion:"v1", ResourceVersion:"1547", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: frontend-jtszh
W0823 01:19:34.355] E0823 01:19:33.735997   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:34.355] E0823 01:19:33.828193   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:34.356] E0823 01:19:33.939242   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:34.356] error: Expected replicas to be 3, was 2
W0823 01:19:34.356] E0823 01:19:34.048915   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:34.357] I0823 01:19:34.257967   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"3fab8d97-c106-47dc-8ba8-36dd6db50f8b", APIVersion:"v1", ResourceVersion:"1553", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-7lflp
I0823 01:19:34.457] core.sh:1099: Successful get rc frontend {{.spec.replicas}}: 3
I0823 01:19:34.457] (Bcore.sh:1103: Successful get rc frontend {{.spec.replicas}}: 3
I0823 01:19:34.534] (Breplicationcontroller/frontend scaled
I0823 01:19:34.635] core.sh:1107: Successful get rc frontend {{.spec.replicas}}: 2
I0823 01:19:34.717] (Breplicationcontroller "frontend" deleted
W0823 01:19:34.818] I0823 01:19:34.539067   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"3fab8d97-c106-47dc-8ba8-36dd6db50f8b", APIVersion:"v1", ResourceVersion:"1558", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: frontend-7lflp
W0823 01:19:34.818] E0823 01:19:34.737659   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:34.829] E0823 01:19:34.829332   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:34.885] I0823 01:19:34.884766   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"redis-master", UID:"cfbe7547-4b0a-4122-b176-a2e02a03265a", APIVersion:"v1", ResourceVersion:"1570", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: redis-master-9mqdb
W0823 01:19:34.941] E0823 01:19:34.940726   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:35.041] replicationcontroller/redis-master created
I0823 01:19:35.070] replicationcontroller/redis-slave created
W0823 01:19:35.171] E0823 01:19:35.050906   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:35.172] I0823 01:19:35.075672   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"redis-slave", UID:"f24733da-568c-4dca-92fb-563393a4bed1", APIVersion:"v1", ResourceVersion:"1575", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: redis-slave-m7plh
W0823 01:19:35.172] I0823 01:19:35.079731   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"redis-slave", UID:"f24733da-568c-4dca-92fb-563393a4bed1", APIVersion:"v1", ResourceVersion:"1575", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: redis-slave-9kbn6
W0823 01:19:35.207] I0823 01:19:35.206695   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"redis-master", UID:"cfbe7547-4b0a-4122-b176-a2e02a03265a", APIVersion:"v1", ResourceVersion:"1583", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: redis-master-9xdfk
W0823 01:19:35.211] I0823 01:19:35.210514   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"redis-master", UID:"cfbe7547-4b0a-4122-b176-a2e02a03265a", APIVersion:"v1", ResourceVersion:"1583", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: redis-master-5689k
W0823 01:19:35.211] I0823 01:19:35.210919   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"redis-master", UID:"cfbe7547-4b0a-4122-b176-a2e02a03265a", APIVersion:"v1", ResourceVersion:"1583", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: redis-master-hp48x
W0823 01:19:35.217] I0823 01:19:35.217114   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"redis-slave", UID:"f24733da-568c-4dca-92fb-563393a4bed1", APIVersion:"v1", ResourceVersion:"1585", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: redis-slave-jptfd
W0823 01:19:35.220] I0823 01:19:35.219990   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"redis-slave", UID:"f24733da-568c-4dca-92fb-563393a4bed1", APIVersion:"v1", ResourceVersion:"1585", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: redis-slave-sqwxk
I0823 01:19:35.321] replicationcontroller/redis-master scaled
I0823 01:19:35.321] replicationcontroller/redis-slave scaled
I0823 01:19:35.321] core.sh:1117: Successful get rc redis-master {{.spec.replicas}}: 4
I0823 01:19:35.426] (Bcore.sh:1118: Successful get rc redis-slave {{.spec.replicas}}: 4
I0823 01:19:35.547] (Breplicationcontroller "redis-master" deleted
I0823 01:19:35.553] replicationcontroller "redis-slave" deleted
W0823 01:19:35.739] E0823 01:19:35.739340   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:35.752] I0823 01:19:35.750517   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment", UID:"3cd60c77-f025-42df-acfc-7b29ef84e11d", APIVersion:"apps/v1", ResourceVersion:"1617", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-deployment-66987bfc58 to 3
W0823 01:19:35.755] I0823 01:19:35.754608   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment-66987bfc58", UID:"901773c1-2277-4ada-b3e5-cb324ca5cae7", APIVersion:"apps/v1", ResourceVersion:"1618", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-deployment-66987bfc58-6jz2c
W0823 01:19:35.760] I0823 01:19:35.759547   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment-66987bfc58", UID:"901773c1-2277-4ada-b3e5-cb324ca5cae7", APIVersion:"apps/v1", ResourceVersion:"1618", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-deployment-66987bfc58-8sm2q
W0823 01:19:35.762] I0823 01:19:35.762156   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment-66987bfc58", UID:"901773c1-2277-4ada-b3e5-cb324ca5cae7", APIVersion:"apps/v1", ResourceVersion:"1618", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-deployment-66987bfc58-nbbqh
W0823 01:19:35.831] E0823 01:19:35.830703   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:35.862] I0823 01:19:35.861455   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment", UID:"3cd60c77-f025-42df-acfc-7b29ef84e11d", APIVersion:"apps/v1", ResourceVersion:"1631", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set nginx-deployment-66987bfc58 to 1
W0823 01:19:35.869] I0823 01:19:35.868473   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment-66987bfc58", UID:"901773c1-2277-4ada-b3e5-cb324ca5cae7", APIVersion:"apps/v1", ResourceVersion:"1632", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: nginx-deployment-66987bfc58-6jz2c
W0823 01:19:35.870] I0823 01:19:35.868987   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment-66987bfc58", UID:"901773c1-2277-4ada-b3e5-cb324ca5cae7", APIVersion:"apps/v1", ResourceVersion:"1632", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: nginx-deployment-66987bfc58-8sm2q
W0823 01:19:35.943] E0823 01:19:35.942270   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:36.043] deployment.apps/nginx-deployment created
I0823 01:19:36.043] deployment.apps/nginx-deployment scaled
I0823 01:19:36.044] core.sh:1127: Successful get deployment nginx-deployment {{.spec.replicas}}: 1
I0823 01:19:36.061] (Bdeployment.apps "nginx-deployment" deleted
W0823 01:19:36.161] E0823 01:19:36.052771   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:36.262] Successful
I0823 01:19:36.262] message:service/expose-test-deployment exposed
I0823 01:19:36.262] has:service/expose-test-deployment exposed
I0823 01:19:36.287] service "expose-test-deployment" deleted
I0823 01:19:36.405] Successful
I0823 01:19:36.406] message:error: couldn't retrieve selectors via --selector flag or introspection: invalid deployment: no selectors, therefore cannot be exposed
I0823 01:19:36.406] See 'kubectl expose -h' for help and examples
I0823 01:19:36.406] has:invalid deployment: no selectors
I0823 01:19:36.600] deployment.apps/nginx-deployment created
W0823 01:19:36.701] I0823 01:19:36.604997   52841 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment", UID:"ab64305f-7cc1-495b-b03e-312b7c2b0283", APIVersion:"apps/v1", ResourceVersion:"1656", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-deployment-66987bfc58 to 3
W0823 01:19:36.702] I0823 01:19:36.611016   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment-66987bfc58", UID:"79a2be47-efe2-4230-92f8-32b953cf177d", APIVersion:"apps/v1", ResourceVersion:"1657", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-deployment-66987bfc58-pzpgt
W0823 01:19:36.702] I0823 01:19:36.615009   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment-66987bfc58", UID:"79a2be47-efe2-4230-92f8-32b953cf177d", APIVersion:"apps/v1", ResourceVersion:"1657", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-deployment-66987bfc58-xmjlr
W0823 01:19:36.703] I0823 01:19:36.617133   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1566523171-3183", Name:"nginx-deployment-66987bfc58", UID:"79a2be47-efe2-4230-92f8-32b953cf177d", APIVersion:"apps/v1", ResourceVersion:"1657", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-deployment-66987bfc58-5kzf7
W0823 01:19:36.741] E0823 01:19:36.740883   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:36.832] E0823 01:19:36.831926   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:36.933] core.sh:1146: Successful get deployment nginx-deployment {{.spec.replicas}}: 3
I0823 01:19:36.933] (Bservice/nginx-deployment exposed
I0823 01:19:36.951] core.sh:1150: Successful get service nginx-deployment {{(index .spec.ports 0).port}}: 80
I0823 01:19:37.046] (Bdeployment.apps "nginx-deployment" deleted
I0823 01:19:37.054] service "nginx-deployment" deleted
W0823 01:19:37.155] E0823 01:19:36.944322   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0823 01:19:37.156] E0823 01:19:37.054254   52841 reflector.go:123] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0823 01:19:37.272] replicationcontroller/frontend created
W0823 01:19:37.373] I0823 01:19:37.278628   52841 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1566523171-3183", Name:"frontend", UID:"b7870f8d-1c2e-4087-b030-a85950ce2488", APIVersion:"v1", ResourceVersion:"1684", FieldPath:"&#