This job view page is being replaced by Spyglass soon. Check out the new job view.
ResultFAILURE
Tests 1 failed / 2469 succeeded
Started2019-08-12 14:15
Elapsed27m32s
Revision
Buildergke-prow-ssd-pool-1a225945-ld64
links{u'resultstore': {u'url': u'https://source.cloud.google.com/results/invocations/3d2db85f-e317-4fd7-9af7-0dd53f1390e6/targets/test'}}
pod8b90ba6e-bd0b-11e9-8c5c-1a8ca1133b15
resultstorehttps://source.cloud.google.com/results/invocations/3d2db85f-e317-4fd7-9af7-0dd53f1390e6/targets/test
infra-commit3d3631683
pod8b90ba6e-bd0b-11e9-8c5c-1a8ca1133b15
repok8s.io/kubernetes
repo-commitae457448d6963ffbb6427afc8c87598f89fadf71
repos{u'k8s.io/kubernetes': u'master'}

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestPreemptWithPermitPlugin 1m4s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestPreemptWithPermitPlugin$
=== RUN   TestPreemptWithPermitPlugin
I0812 14:38:10.605569  110675 services.go:33] Network range for service cluster IPs is unspecified. Defaulting to {10.0.0.0 ffffff00}.
I0812 14:38:10.605612  110675 services.go:45] Setting service IP to "10.0.0.1" (read-write).
I0812 14:38:10.605625  110675 master.go:278] Node port range unspecified. Defaulting to 30000-32767.
I0812 14:38:10.605636  110675 master.go:234] Using reconciler: 
I0812 14:38:10.607965  110675 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.608123  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.608364  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.608625  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.608829  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.609711  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.610032  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.610058  110675 store.go:1342] Monitoring podtemplates count at <storage-prefix>//podtemplates
I0812 14:38:10.610097  110675 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.610290  110675 reflector.go:160] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I0812 14:38:10.610351  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.610363  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.610398  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.610773  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.612515  110675 watch_cache.go:405] Replace watchCache (rev: 28552) 
I0812 14:38:10.612525  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.612892  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.613684  110675 store.go:1342] Monitoring events count at <storage-prefix>//events
I0812 14:38:10.613735  110675 reflector.go:160] Listing and watching *core.Event from storage/cacher.go:/events
I0812 14:38:10.613731  110675 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.613827  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.613838  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.614150  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.614223  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.614600  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.614621  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.615800  110675 watch_cache.go:405] Replace watchCache (rev: 28552) 
I0812 14:38:10.617140  110675 store.go:1342] Monitoring limitranges count at <storage-prefix>//limitranges
I0812 14:38:10.617194  110675 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.617275  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.617288  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.617648  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.618664  110675 reflector.go:160] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I0812 14:38:10.618851  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.619212  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.619333  110675 store.go:1342] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I0812 14:38:10.619518  110675 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.619632  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.619643  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.619678  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.619709  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.619720  110675 reflector.go:160] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I0812 14:38:10.619881  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.620204  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.620314  110675 store.go:1342] Monitoring secrets count at <storage-prefix>//secrets
I0812 14:38:10.620516  110675 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.620629  110675 reflector.go:160] Listing and watching *core.Secret from storage/cacher.go:/secrets
I0812 14:38:10.620682  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.620693  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.620724  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.620582  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.620792  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.621386  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.621468  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.621621  110675 store.go:1342] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I0812 14:38:10.621744  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.621797  110675 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.621859  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.621876  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.621886  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.621916  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.621970  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.621997  110675 reflector.go:160] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I0812 14:38:10.622273  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.622422  110675 store.go:1342] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I0812 14:38:10.622593  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.622666  110675 reflector.go:160] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I0812 14:38:10.622714  110675 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.622784  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.622796  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.622827  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.622884  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.623881  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.623977  110675 store.go:1342] Monitoring configmaps count at <storage-prefix>//configmaps
I0812 14:38:10.624107  110675 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.624192  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.624206  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.624236  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.624275  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.624311  110675 reflector.go:160] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I0812 14:38:10.624467  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.624577  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.624624  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.624842  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.624871  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.624962  110675 store.go:1342] Monitoring namespaces count at <storage-prefix>//namespaces
I0812 14:38:10.624992  110675 reflector.go:160] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I0812 14:38:10.625121  110675 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.625192  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.625204  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.625235  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.625285  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.625770  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.625900  110675 store.go:1342] Monitoring endpoints count at <storage-prefix>//services/endpoints
I0812 14:38:10.626040  110675 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.626098  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.626109  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.626146  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.626188  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.626219  110675 reflector.go:160] Listing and watching *core.Endpoints from storage/cacher.go:/services/endpoints
I0812 14:38:10.626457  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.627165  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.627277  110675 store.go:1342] Monitoring nodes count at <storage-prefix>//minions
I0812 14:38:10.627430  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.627494  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.627504  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.627535  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.627608  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.627641  110675 reflector.go:160] Listing and watching *core.Node from storage/cacher.go:/minions
I0812 14:38:10.627827  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.629018  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.629384  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.629642  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.629719  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.629968  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.630090  110675 store.go:1342] Monitoring pods count at <storage-prefix>//pods
I0812 14:38:10.630285  110675 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.630354  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.630365  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.630399  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.630463  110675 reflector.go:160] Listing and watching *core.Pod from storage/cacher.go:/pods
I0812 14:38:10.630505  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.630539  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.630665  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.630895  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.630955  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.631007  110675 store.go:1342] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I0812 14:38:10.631136  110675 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.631194  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.631205  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.631237  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.631360  110675 reflector.go:160] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I0812 14:38:10.632271  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.633455  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.634979  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.635766  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.635972  110675 store.go:1342] Monitoring services count at <storage-prefix>//services/specs
I0812 14:38:10.636013  110675 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.636106  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.636118  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.636157  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.636205  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.636236  110675 reflector.go:160] Listing and watching *core.Service from storage/cacher.go:/services/specs
I0812 14:38:10.636437  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.637032  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.637130  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.637207  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.638275  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.639454  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.641266  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.641347  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.641841  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.641934  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.642063  110675 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.642161  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.642267  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.642492  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.642675  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.643421  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.643816  110675 store.go:1342] Monitoring replicationcontrollers count at <storage-prefix>//controllers
I0812 14:38:10.644379  110675 reflector.go:160] Listing and watching *core.ReplicationController from storage/cacher.go:/controllers
I0812 14:38:10.644399  110675 storage_factory.go:285] storing bindings in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.644630  110675 storage_factory.go:285] storing componentstatuses in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.644870  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.645356  110675 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.646046  110675 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.646482  110675 watch_cache.go:405] Replace watchCache (rev: 28553) 
I0812 14:38:10.646775  110675 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.647484  110675 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.648442  110675 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.648716  110675 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.649082  110675 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.649991  110675 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.650801  110675 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.651169  110675 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.653201  110675 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.655155  110675 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.657458  110675 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.658030  110675 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.659508  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.660074  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.660390  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.660811  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.661153  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.661451  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.661769  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.664944  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.665367  110675 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.666573  110675 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.667805  110675 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.668402  110675 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.668842  110675 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.669719  110675 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.670162  110675 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.671191  110675 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.677148  110675 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.678322  110675 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.679604  110675 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.680341  110675 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.680771  110675 master.go:418] Skipping disabled API group "auditregistration.k8s.io".
I0812 14:38:10.680999  110675 master.go:426] Enabling API group "authentication.k8s.io".
I0812 14:38:10.681173  110675 master.go:426] Enabling API group "authorization.k8s.io".
I0812 14:38:10.681575  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.681992  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.682277  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.682440  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.682856  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.684246  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.684414  110675 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0812 14:38:10.684633  110675 reflector.go:160] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0812 14:38:10.684645  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.684722  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.684732  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.684774  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.684831  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.685189  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.686088  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.686734  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.687017  110675 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0812 14:38:10.688848  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.688984  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.689008  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.689054  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.687345  110675 reflector.go:160] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0812 14:38:10.689386  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.687995  110675 watch_cache.go:405] Replace watchCache (rev: 28554) 
I0812 14:38:10.689873  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.689989  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.690032  110675 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I0812 14:38:10.690054  110675 master.go:426] Enabling API group "autoscaling".
I0812 14:38:10.690216  110675 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.690297  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.690307  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.690339  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.690387  110675 reflector.go:160] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I0812 14:38:10.690474  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.690973  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.691192  110675 store.go:1342] Monitoring jobs.batch count at <storage-prefix>//jobs
I0812 14:38:10.691427  110675 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.691504  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.691514  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.691573  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.691634  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.691658  110675 watch_cache.go:405] Replace watchCache (rev: 28554) 
I0812 14:38:10.691809  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.691829  110675 reflector.go:160] Listing and watching *batch.Job from storage/cacher.go:/jobs
I0812 14:38:10.692794  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.692826  110675 watch_cache.go:405] Replace watchCache (rev: 28554) 
I0812 14:38:10.692981  110675 store.go:1342] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I0812 14:38:10.693009  110675 master.go:426] Enabling API group "batch".
I0812 14:38:10.693145  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.693183  110675 reflector.go:160] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I0812 14:38:10.693174  110675 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.693250  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.693261  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.693292  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.693425  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.693906  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.694044  110675 store.go:1342] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I0812 14:38:10.694107  110675 master.go:426] Enabling API group "certificates.k8s.io".
I0812 14:38:10.694250  110675 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.694356  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.694368  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.694428  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.694485  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.694563  110675 reflector.go:160] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I0812 14:38:10.694739  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.698448  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.698485  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.698600  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.701589  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.701869  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.702246  110675 store.go:1342] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0812 14:38:10.702311  110675 reflector.go:160] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0812 14:38:10.704507  110675 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.704687  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.704700  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.705166  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.705232  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.705857  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.706043  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.706147  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.706221  110675 store.go:1342] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I0812 14:38:10.706239  110675 master.go:426] Enabling API group "coordination.k8s.io".
I0812 14:38:10.706429  110675 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.706516  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.706528  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.706587  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.706641  110675 reflector.go:160] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I0812 14:38:10.706916  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.707254  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.707374  110675 store.go:1342] Monitoring ingresses.networking.k8s.io count at <storage-prefix>//ingress
I0812 14:38:10.707397  110675 master.go:426] Enabling API group "extensions".
I0812 14:38:10.707403  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.707450  110675 reflector.go:160] Listing and watching *networking.Ingress from storage/cacher.go:/ingress
I0812 14:38:10.707586  110675 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.707663  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.707674  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.707707  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.707863  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.708187  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.708294  110675 store.go:1342] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I0812 14:38:10.708340  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.708466  110675 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.708526  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.708537  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.708664  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.708933  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.709036  110675 reflector.go:160] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I0812 14:38:10.709356  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.709486  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.710169  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.710427  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.710491  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.710568  110675 store.go:1342] Monitoring ingresses.networking.k8s.io count at <storage-prefix>//ingress
I0812 14:38:10.710588  110675 master.go:426] Enabling API group "networking.k8s.io".
I0812 14:38:10.710638  110675 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.710708  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.710720  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.710797  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.710836  110675 reflector.go:160] Listing and watching *networking.Ingress from storage/cacher.go:/ingress
I0812 14:38:10.711096  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.711452  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.711582  110675 store.go:1342] Monitoring runtimeclasses.node.k8s.io count at <storage-prefix>//runtimeclasses
I0812 14:38:10.711599  110675 master.go:426] Enabling API group "node.k8s.io".
I0812 14:38:10.711746  110675 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.711841  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.711851  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.711887  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.711940  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.711973  110675 reflector.go:160] Listing and watching *node.RuntimeClass from storage/cacher.go:/runtimeclasses
I0812 14:38:10.712222  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.712945  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.712947  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.713062  110675 store.go:1342] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I0812 14:38:10.713217  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.713243  110675 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.713266  110675 reflector.go:160] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I0812 14:38:10.713310  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.713322  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.713352  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.713488  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.714014  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.714140  110675 store.go:1342] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicy
I0812 14:38:10.714163  110675 master.go:426] Enabling API group "policy".
I0812 14:38:10.714194  110675 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.714255  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.714266  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.714296  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.714334  110675 reflector.go:160] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicy
I0812 14:38:10.714370  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.714512  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.714798  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.714840  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.715310  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.715524  110675 store.go:1342] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0812 14:38:10.715692  110675 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.715719  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.715762  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.715774  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.715816  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.715816  110675 reflector.go:160] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0812 14:38:10.715909  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.715947  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.716267  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.716375  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.716402  110675 reflector.go:160] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0812 14:38:10.716385  110675 store.go:1342] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0812 14:38:10.716443  110675 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.716513  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.716522  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.716592  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.716681  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.717000  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.717072  110675 store.go:1342] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0812 14:38:10.717095  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.717219  110675 reflector.go:160] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0812 14:38:10.717507  110675 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.717598  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.717609  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.717640  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.717718  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.718589  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.718627  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.718678  110675 store.go:1342] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0812 14:38:10.718713  110675 reflector.go:160] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0812 14:38:10.718721  110675 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.718830  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.718842  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.718871  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.718918  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.718969  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.719311  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.719853  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.719893  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.719982  110675 store.go:1342] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I0812 14:38:10.720329  110675 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.720392  110675 reflector.go:160] Listing and watching *rbac.Role from storage/cacher.go:/roles
I0812 14:38:10.720403  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.720415  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.720445  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.720347  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.720673  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.720944  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.721079  110675 store.go:1342] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I0812 14:38:10.721195  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.721321  110675 reflector.go:160] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I0812 14:38:10.721111  110675 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.721703  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.721720  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.721747  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.721818  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.722019  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.722282  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.722395  110675 store.go:1342] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I0812 14:38:10.722533  110675 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.722583  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.722622  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.722633  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.722659  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.722695  110675 reflector.go:160] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I0812 14:38:10.722753  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.722842  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.722875  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.723272  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.723398  110675 store.go:1342] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I0812 14:38:10.723430  110675 master.go:426] Enabling API group "rbac.authorization.k8s.io".
I0812 14:38:10.723532  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.723754  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.723896  110675 reflector.go:160] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I0812 14:38:10.725068  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.725506  110675 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.725776  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.725794  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.725838  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.725907  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.727676  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.727751  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.727844  110675 store.go:1342] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0812 14:38:10.727947  110675 reflector.go:160] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0812 14:38:10.728067  110675 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.728139  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.728149  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.728182  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.728339  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.728744  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.728839  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.728911  110675 store.go:1342] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I0812 14:38:10.728928  110675 master.go:426] Enabling API group "scheduling.k8s.io".
I0812 14:38:10.729062  110675 master.go:418] Skipping disabled API group "settings.k8s.io".
I0812 14:38:10.729170  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.729200  110675 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.729275  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.729287  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.729317  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.729332  110675 reflector.go:160] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I0812 14:38:10.729518  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.730150  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.730188  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.730319  110675 store.go:1342] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0812 14:38:10.730329  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.730460  110675 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.730502  110675 reflector.go:160] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0812 14:38:10.730578  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.730592  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.730631  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.730749  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.731109  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.731221  110675 store.go:1342] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0812 14:38:10.731253  110675 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.731314  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.731330  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.731392  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.731443  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.731474  110675 reflector.go:160] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0812 14:38:10.731735  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.732091  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.732209  110675 store.go:1342] Monitoring csinodes.storage.k8s.io count at <storage-prefix>//csinodes
I0812 14:38:10.732241  110675 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.732356  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.732406  110675 reflector.go:160] Listing and watching *storage.CSINode from storage/cacher.go:/csinodes
I0812 14:38:10.732501  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.732515  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.732566  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.732903  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.733297  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.733444  110675 store.go:1342] Monitoring csidrivers.storage.k8s.io count at <storage-prefix>//csidrivers
I0812 14:38:10.733612  110675 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.733675  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.733687  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.733720  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.733767  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.733798  110675 reflector.go:160] Listing and watching *storage.CSIDriver from storage/cacher.go:/csidrivers
I0812 14:38:10.734026  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.734744  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.734833  110675 store.go:1342] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I0812 14:38:10.734972  110675 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.735034  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.735046  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.735100  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.735181  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.735226  110675 reflector.go:160] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I0812 14:38:10.735467  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.735790  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.735846  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.735882  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.735887  110675 store.go:1342] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I0812 14:38:10.735909  110675 master.go:426] Enabling API group "storage.k8s.io".
I0812 14:38:10.736008  110675 reflector.go:160] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I0812 14:38:10.736036  110675 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.736099  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.736111  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.736139  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.736213  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.737305  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.737772  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.737804  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.737841  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.738040  110675 watch_cache.go:405] Replace watchCache (rev: 28555) 
I0812 14:38:10.740982  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.741101  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.741523  110675 store.go:1342] Monitoring deployments.apps count at <storage-prefix>//deployments
I0812 14:38:10.741625  110675 reflector.go:160] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I0812 14:38:10.741730  110675 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.741988  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.742009  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.742188  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.742247  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.742800  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.743040  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.743367  110675 store.go:1342] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I0812 14:38:10.743480  110675 reflector.go:160] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I0812 14:38:10.743745  110675 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.744057  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.744190  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.744322  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.744487  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.744622  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.744649  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.747077  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.747332  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.747844  110675 store.go:1342] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I0812 14:38:10.747975  110675 reflector.go:160] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I0812 14:38:10.748312  110675 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.748420  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.748436  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.748588  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.748684  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.749102  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.749308  110675 store.go:1342] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I0812 14:38:10.749410  110675 reflector.go:160] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I0812 14:38:10.749641  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.749900  110675 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.749998  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.750332  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.750644  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.751514  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.751768  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.752272  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.752393  110675 store.go:1342] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I0812 14:38:10.752415  110675 master.go:426] Enabling API group "apps".
I0812 14:38:10.752454  110675 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.752534  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.752577  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.752610  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.752707  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.752740  110675 reflector.go:160] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I0812 14:38:10.753013  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.754184  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.754325  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.754359  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.754489  110675 store.go:1342] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0812 14:38:10.754529  110675 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.754617  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.754627  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.754658  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.754698  110675 reflector.go:160] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0812 14:38:10.754882  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.755200  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.755320  110675 store.go:1342] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0812 14:38:10.755354  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.755356  110675 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.755408  110675 reflector.go:160] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0812 14:38:10.755449  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.755460  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.755486  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.755734  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.756573  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.756597  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.757430  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.758318  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.758432  110675 store.go:1342] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I0812 14:38:10.758465  110675 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.758533  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.758566  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.758600  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.758679  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.758970  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.759158  110675 reflector.go:160] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I0812 14:38:10.759611  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.759728  110675 store.go:1342] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I0812 14:38:10.759742  110675 master.go:426] Enabling API group "admissionregistration.k8s.io".
I0812 14:38:10.759778  110675 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.759808  110675 reflector.go:160] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I0812 14:38:10.759842  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.760014  110675 client.go:354] parsed scheme: ""
I0812 14:38:10.760025  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:10.760054  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:10.760130  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.760183  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.760783  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:10.760823  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:10.760899  110675 store.go:1342] Monitoring events count at <storage-prefix>//events
I0812 14:38:10.760915  110675 master.go:426] Enabling API group "events.k8s.io".
I0812 14:38:10.761125  110675 storage_factory.go:285] storing tokenreviews.authentication.k8s.io in authentication.k8s.io/v1, reading as authentication.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.761323  110675 storage_factory.go:285] storing tokenreviews.authentication.k8s.io in authentication.k8s.io/v1, reading as authentication.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.761408  110675 reflector.go:160] Listing and watching *core.Event from storage/cacher.go:/events
I0812 14:38:10.761608  110675 storage_factory.go:285] storing localsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.761708  110675 storage_factory.go:285] storing selfsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.761828  110675 storage_factory.go:285] storing selfsubjectrulesreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.761915  110675 storage_factory.go:285] storing subjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.762073  110675 storage_factory.go:285] storing localsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.762162  110675 storage_factory.go:285] storing selfsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.762241  110675 storage_factory.go:285] storing selfsubjectrulesreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.762352  110675 storage_factory.go:285] storing subjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.763074  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.763206  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.763448  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.763813  110675 watch_cache.go:405] Replace watchCache (rev: 28556) 
I0812 14:38:10.764911  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.765168  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.766274  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.766518  110675 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.767422  110675 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.767986  110675 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.768818  110675 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.769180  110675 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0812 14:38:10.769305  110675 genericapiserver.go:390] Skipping API batch/v2alpha1 because it has no resources.
I0812 14:38:10.770071  110675 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.770267  110675 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.770755  110675 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.771827  110675 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.772844  110675 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.773760  110675 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.774192  110675 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.775150  110675 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.776219  110675 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.776520  110675 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.777245  110675 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0812 14:38:10.777322  110675 genericapiserver.go:390] Skipping API node.k8s.io/v1alpha1 because it has no resources.
I0812 14:38:10.778122  110675 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.778410  110675 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.779004  110675 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.779683  110675 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.780154  110675 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.781329  110675 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.782214  110675 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.782944  110675 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.783413  110675 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.784093  110675 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.785038  110675 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0812 14:38:10.785105  110675 genericapiserver.go:390] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
I0812 14:38:10.785851  110675 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.786600  110675 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0812 14:38:10.786676  110675 genericapiserver.go:390] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
I0812 14:38:10.787399  110675 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.788416  110675 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.788903  110675 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.789998  110675 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.790817  110675 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.791587  110675 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.792331  110675 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0812 14:38:10.792572  110675 genericapiserver.go:390] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
I0812 14:38:10.794106  110675 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.795194  110675 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.795694  110675 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.796895  110675 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.797145  110675 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.797839  110675 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.798943  110675 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.799405  110675 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.799767  110675 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.800640  110675 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.801014  110675 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.801433  110675 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W0812 14:38:10.801629  110675 genericapiserver.go:390] Skipping API apps/v1beta2 because it has no resources.
W0812 14:38:10.801711  110675 genericapiserver.go:390] Skipping API apps/v1beta1 because it has no resources.
I0812 14:38:10.803057  110675 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.803961  110675 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.804847  110675 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.805896  110675 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.806869  110675 storage_factory.go:285] storing events.events.k8s.io in events.k8s.io/v1beta1, reading as events.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"90fdce5c-7965-43bf-b2a2-6dc6eab468fb", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I0812 14:38:10.810208  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:10.810367  110675 healthz.go:169] healthz check poststarthook/bootstrap-controller failed: not finished
I0812 14:38:10.810447  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:10.810513  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:10.810635  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:10.810700  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:10.810854  110675 httplog.go:90] GET /healthz: (761.597µs) 0 [Go-http-client/1.1 127.0.0.1:56038]
I0812 14:38:10.812520  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (2.345786ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.818845  110675 httplog.go:90] GET /api/v1/services: (1.704382ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.828186  110675 httplog.go:90] GET /api/v1/services: (5.552527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.831163  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:10.831208  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:10.831220  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:10.831230  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:10.831238  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:10.831404  110675 httplog.go:90] GET /healthz: (223.952µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56038]
I0812 14:38:10.834843  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (3.30598ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.835017  110675 httplog.go:90] GET /api/v1/services: (2.52212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56044]
I0812 14:38:10.836461  110675 httplog.go:90] GET /api/v1/services: (3.975394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56042]
I0812 14:38:10.840308  110675 httplog.go:90] POST /api/v1/namespaces: (2.244766ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.842479  110675 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.35913ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.846537  110675 httplog.go:90] POST /api/v1/namespaces: (1.780898ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.848662  110675 httplog.go:90] GET /api/v1/namespaces/kube-node-lease: (1.500998ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.853462  110675 httplog.go:90] POST /api/v1/namespaces: (4.015612ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:10.913585  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:10.913631  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:10.913645  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:10.913655  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:10.913664  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:10.913707  110675 httplog.go:90] GET /healthz: (310.853µs) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:10.933420  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:10.933455  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:10.933467  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:10.933478  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:10.933488  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:10.933520  110675 httplog.go:90] GET /healthz: (297.846µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.012184  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.012223  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.012236  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.012245  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.012253  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.012288  110675 httplog.go:90] GET /healthz: (248.469µs) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:11.033329  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.033362  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.033372  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.033380  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.033386  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.033434  110675 httplog.go:90] GET /healthz: (241.787µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.112260  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.112308  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.112321  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.112330  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.112338  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.112371  110675 httplog.go:90] GET /healthz: (278.508µs) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:11.133283  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.133330  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.133342  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.133351  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.133362  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.133394  110675 httplog.go:90] GET /healthz: (236.534µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.212832  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.212879  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.212892  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.212902  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.212911  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.212944  110675 httplog.go:90] GET /healthz: (271.908µs) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:11.233499  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.233540  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.233575  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.233587  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.233595  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.233626  110675 httplog.go:90] GET /healthz: (299.907µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.312128  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.312170  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.312184  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.312194  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.312202  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.312233  110675 httplog.go:90] GET /healthz: (256.417µs) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:11.333417  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.333455  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.333469  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.333479  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.333488  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.333540  110675 httplog.go:90] GET /healthz: (342.337µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.412191  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.412230  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.412243  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.412253  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.412261  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.412297  110675 httplog.go:90] GET /healthz: (266.473µs) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:11.440975  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.441025  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.441038  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.441048  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.441056  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.441102  110675 httplog.go:90] GET /healthz: (330.207µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.515170  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.515218  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.515233  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.515246  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.515256  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.515357  110675 httplog.go:90] GET /healthz: (392.043µs) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:11.533307  110675 healthz.go:169] healthz check etcd failed: etcd client connection not yet established
I0812 14:38:11.533355  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.533368  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.533379  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.533388  110675 healthz.go:183] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.533419  110675 httplog.go:90] GET /healthz: (267.493µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.606325  110675 client.go:354] parsed scheme: ""
I0812 14:38:11.606363  110675 client.go:354] scheme "" not registered, fallback to default scheme
I0812 14:38:11.606413  110675 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I0812 14:38:11.606473  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:11.607485  110675 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I0812 14:38:11.607592  110675 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I0812 14:38:11.613681  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.613716  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.613727  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.613736  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.613783  110675 httplog.go:90] GET /healthz: (1.751804ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:11.634599  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.634644  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.634655  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.634664  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.634734  110675 httplog.go:90] GET /healthz: (1.446909ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.713453  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.713492  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.713504  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.713513  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.713606  110675 httplog.go:90] GET /healthz: (1.559594ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:11.734703  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.734739  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.734750  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.734759  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.734803  110675 httplog.go:90] GET /healthz: (1.571107ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.811982  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.024712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.812419  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.517371ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56258]
I0812 14:38:11.813796  110675 httplog.go:90] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-node-critical: (3.820068ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56038]
I0812 14:38:11.815375  110675 httplog.go:90] GET /api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: (2.992445ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.816033  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.816079  110675 healthz.go:169] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I0812 14:38:11.816096  110675 healthz.go:169] healthz check poststarthook/ca-registration failed: not finished
I0812 14:38:11.816105  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
healthz check failed
I0812 14:38:11.816146  110675 httplog.go:90] GET /healthz: (2.720164ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:11.816375  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.588733ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56258]
I0812 14:38:11.817183  110675 httplog.go:90] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (2.712464ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56038]
I0812 14:38:11.817394  110675 storage_scheduling.go:119] created PriorityClass system-node-critical with value 2000001000
I0812 14:38:11.819285  110675 httplog.go:90] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-cluster-critical: (1.513052ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56038]
I0812 14:38:11.819500  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (2.039978ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56258]
I0812 14:38:11.823070  110675 httplog.go:90] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (2.895599ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56038]
I0812 14:38:11.823219  110675 httplog.go:90] POST /api/v1/namespaces/kube-system/configmaps: (6.448428ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.823453  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (2.768837ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56258]
I0812 14:38:11.823687  110675 storage_scheduling.go:119] created PriorityClass system-cluster-critical with value 2000000000
I0812 14:38:11.823705  110675 storage_scheduling.go:128] all system priority classes are created successfully or already exist.
I0812 14:38:11.824962  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (1.086259ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.826256  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (916.733µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.827858  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (1.015319ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.829241  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (956.229µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.831193  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (944.326µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.833084  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (1.11712ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.834075  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.834104  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:11.834135  110675 httplog.go:90] GET /healthz: (1.060719ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.835898  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.047375ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.836126  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I0812 14:38:11.837676  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (1.293075ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.840364  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.200487ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.840633  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:discovery
I0812 14:38:11.842195  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (1.228692ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.845331  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.352127ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.845702  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I0812 14:38:11.846989  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:public-info-viewer: (1.089448ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.849949  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.541604ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.850159  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:public-info-viewer
I0812 14:38:11.851466  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (1.125339ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.853869  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.803818ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.854212  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/admin
I0812 14:38:11.855749  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (1.129567ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.857752  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.544238ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.858301  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/edit
I0812 14:38:11.859895  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (1.145625ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.862465  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.955773ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.862757  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/view
I0812 14:38:11.864162  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (1.026454ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.866239  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.743967ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.866438  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I0812 14:38:11.867908  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (1.008033ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.870761  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.248084ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.871252  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I0812 14:38:11.872719  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (1.104791ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.880423  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.407177ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.880772  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I0812 14:38:11.882399  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (1.266109ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.885087  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.93314ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.885385  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:heapster
I0812 14:38:11.886926  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (1.196634ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.891956  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.092602ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.892900  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:node
I0812 14:38:11.894832  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (1.431167ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.896928  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.613411ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.897321  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I0812 14:38:11.898431  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (904.316µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.900294  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.410725ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.904689  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I0812 14:38:11.908413  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (3.131146ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.911361  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.347338ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.912104  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I0812 14:38:11.913716  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.913740  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:11.913789  110675 httplog.go:90] GET /healthz: (1.724514ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:11.914078  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (1.774038ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.916776  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.266967ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.917006  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I0812 14:38:11.919442  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (2.190976ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.925436  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (5.479762ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.925888  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I0812 14:38:11.938818  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (12.572709ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:11.939010  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:11.939030  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:11.939060  110675 httplog.go:90] GET /healthz: (5.759176ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.941540  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.953745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.944262  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I0812 14:38:11.945686  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (1.16621ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.950759  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.536965ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.951099  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I0812 14:38:11.953270  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (1.460828ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.956116  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.241398ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.956390  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I0812 14:38:11.957856  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (1.180581ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.960377  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.00745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.960677  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I0812 14:38:11.962070  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-attacher: (1.144627ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.964577  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.079505ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.964812  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:csi-external-attacher
I0812 14:38:11.966159  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (1.147596ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.968419  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.868036ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.968670  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I0812 14:38:11.970244  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (1.047488ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.973475  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.684042ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.973742  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I0812 14:38:11.975258  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (1.336994ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.979072  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.151286ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.979588  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I0812 14:38:11.980918  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (1.043073ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.984051  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.574015ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.984600  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I0812 14:38:11.985982  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-provisioner: (1.122108ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.988962  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.322276ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.989327  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:csi-external-provisioner
I0812 14:38:11.990979  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (1.177033ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.996149  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.721391ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:11.996718  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0812 14:38:11.998791  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (1.566861ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.002845  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.482971ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.003189  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0812 14:38:12.004854  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (1.381949ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.007611  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.107914ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.007853  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0812 14:38:12.009299  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (1.0413ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.011785  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.011085ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.012050  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0812 14:38:12.014894  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (2.594678ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.015346  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.015374  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.015404  110675 httplog.go:90] GET /healthz: (1.852428ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:12.018038  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.401993ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.018433  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I0812 14:38:12.020905  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (1.801058ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.027895  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (5.966794ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.028505  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I0812 14:38:12.031153  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (2.211363ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.069161  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.069188  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.069230  110675 httplog.go:90] GET /healthz: (36.026351ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.071407  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (39.690832ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.071783  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0812 14:38:12.074762  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (2.538761ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.077819  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.349379ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.078351  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I0812 14:38:12.080212  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (1.578828ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.083432  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.687081ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.083802  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0812 14:38:12.086135  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (2.116676ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.088968  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.281565ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.089380  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0812 14:38:12.091108  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (1.424049ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.099314  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (7.478933ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.099644  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I0812 14:38:12.103401  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (3.48492ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.110852  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (7.003353ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.111160  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I0812 14:38:12.113237  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (1.682314ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.113863  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.113894  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.113927  110675 httplog.go:90] GET /healthz: (1.698545ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:12.117013  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.979388ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.117305  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I0812 14:38:12.118771  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (1.149085ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.125515  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (5.365684ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.125890  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0812 14:38:12.128271  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (2.133314ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.131368  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.407673ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.131656  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0812 14:38:12.138144  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (6.230046ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.139032  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.139056  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.139134  110675 httplog.go:90] GET /healthz: (1.182864ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.143694  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.948689ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.143988  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0812 14:38:12.146450  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (2.136898ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.149170  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.184477ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.149902  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I0812 14:38:12.153451  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (3.3158ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.158497  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.794184ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.159138  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0812 14:38:12.161802  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (1.306126ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.165105  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.805546ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.165566  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I0812 14:38:12.168522  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (2.766465ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.175320  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (6.302707ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.176472  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I0812 14:38:12.180263  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (3.389223ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.184078  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.179697ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.184340  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I0812 14:38:12.186369  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (1.786123ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.190704  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.777239ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.191047  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0812 14:38:12.192523  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (1.269582ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.195069  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.918713ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.195418  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I0812 14:38:12.197875  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (2.226182ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.201141  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.19387ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.201369  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I0812 14:38:12.204386  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (2.829488ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.207525  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.28981ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.207784  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0812 14:38:12.210485  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (2.529581ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.214504  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.546331ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.214730  110675 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0812 14:38:12.216125  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.125836ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.216736  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.216783  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.216851  110675 httplog.go:90] GET /healthz: (4.809385ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:12.219442  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.764828ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.220117  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I0812 14:38:12.222789  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (2.162702ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.226424  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.824557ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.226709  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I0812 14:38:12.228664  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (1.755345ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.232977  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.37851ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.233508  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I0812 14:38:12.234203  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.234232  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.234284  110675 httplog.go:90] GET /healthz: (1.204423ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.252826  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:public-info-viewer: (2.110525ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.273166  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.254391ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.273446  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:public-info-viewer
I0812 14:38:12.291836  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (1.230582ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.313303  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.313343  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.313398  110675 httplog.go:90] GET /healthz: (1.424539ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:12.313804  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.166071ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.314040  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I0812 14:38:12.332742  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (2.113088ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.334661  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.334694  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.334727  110675 httplog.go:90] GET /healthz: (1.072286ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.357137  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.59867ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.357431  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I0812 14:38:12.371905  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (1.297423ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.392886  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.124818ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.393188  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I0812 14:38:12.412528  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (1.75477ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.413268  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.413301  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.413348  110675 httplog.go:90] GET /healthz: (1.431806ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:12.433940  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.244859ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.434251  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I0812 14:38:12.434849  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.434883  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.434920  110675 httplog.go:90] GET /healthz: (1.799367ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.452172  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (1.511717ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.472950  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.275926ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.473264  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I0812 14:38:12.492452  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (1.7316ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.513194  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.513237  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.513282  110675 httplog.go:90] GET /healthz: (1.154287ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:12.513589  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.844089ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.514200  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I0812 14:38:12.532674  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (1.959639ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.534966  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.535005  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.535050  110675 httplog.go:90] GET /healthz: (1.388331ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.553162  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.448621ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.553412  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I0812 14:38:12.573041  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.842836ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.593114  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.363662ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.595128  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I0812 14:38:12.612312  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (1.67209ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.613148  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.613176  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.613520  110675 httplog.go:90] GET /healthz: (1.149545ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:12.633591  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.83307ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.633901  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I0812 14:38:12.634090  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.634146  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.634179  110675 httplog.go:90] GET /healthz: (1.077213ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.652085  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (1.47627ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.674104  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.446828ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.674695  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I0812 14:38:12.692749  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (2.030887ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.713791  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.129567ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.714082  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I0812 14:38:12.714331  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.714352  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.714414  110675 httplog.go:90] GET /healthz: (1.149953ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:12.732187  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (1.543454ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.734484  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.734520  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.734601  110675 httplog.go:90] GET /healthz: (1.438607ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.753658  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.926174ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.754202  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I0812 14:38:12.773001  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.513349ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.793793  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.151429ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.794120  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I0812 14:38:12.812687  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (1.978756ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.814369  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.814404  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.814443  110675 httplog.go:90] GET /healthz: (2.555819ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:12.834042  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.344481ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:12.834071  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.834121  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.834200  110675 httplog.go:90] GET /healthz: (994.228µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.834268  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I0812 14:38:12.852511  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.824017ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.874034  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.144312ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.874423  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I0812 14:38:12.892155  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.507808ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.912898  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.912933  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.913261  110675 httplog.go:90] GET /healthz: (1.263157ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:12.914393  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.70219ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.914641  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I0812 14:38:12.931855  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (1.234114ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.933974  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:12.934111  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:12.934577  110675 httplog.go:90] GET /healthz: (1.46317ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.953618  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.923808ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.954227  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I0812 14:38:12.972618  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (1.27814ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.996494  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (5.796883ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:12.996815  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I0812 14:38:13.018184  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.018233  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.018286  110675 httplog.go:90] GET /healthz: (6.348927ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:13.018849  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (8.172887ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.033016  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.490818ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.033239  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I0812 14:38:13.034976  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.035006  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.035060  110675 httplog.go:90] GET /healthz: (1.848658ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.052499  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (1.274615ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.074193  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.535543ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.074575  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I0812 14:38:13.092145  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (1.51923ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.113481  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.113516  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.113687  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.03309ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.113961  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I0812 14:38:13.114080  110675 httplog.go:90] GET /healthz: (2.180418ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:13.132789  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (2.146494ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.134873  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.135071  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.136225  110675 httplog.go:90] GET /healthz: (2.467133ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.153111  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.368969ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.153714  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I0812 14:38:13.172052  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (1.294123ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.193170  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.483248ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.193897  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I0812 14:38:13.211985  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (1.308769ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.212789  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.212819  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.212872  110675 httplog.go:90] GET /healthz: (967.234µs) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:13.233065  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.334407ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.233423  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I0812 14:38:13.234871  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.234904  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.234974  110675 httplog.go:90] GET /healthz: (1.076768ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.252635  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.995809ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.274427  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.94071ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.274831  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I0812 14:38:13.292003  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.401408ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.312853  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.236163ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.313352  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.313400  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.313434  110675 httplog.go:90] GET /healthz: (1.085128ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:13.313635  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I0812 14:38:13.332365  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (1.684521ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.335445  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.335481  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.335522  110675 httplog.go:90] GET /healthz: (2.156641ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.353813  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.088455ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.354090  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I0812 14:38:13.372278  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.627716ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.393413  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.68222ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.394748  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I0812 14:38:13.411917  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (1.268179ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.413243  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.413267  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.413299  110675 httplog.go:90] GET /healthz: (942.435µs) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:13.434058  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.434094  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.434134  110675 httplog.go:90] GET /healthz: (1.00159ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.434274  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.665308ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.434486  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I0812 14:38:13.453006  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (2.386204ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.473520  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.582205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.473896  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I0812 14:38:13.491850  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (1.23344ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.513665  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.513698  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.513737  110675 httplog.go:90] GET /healthz: (1.097365ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:13.514313  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.748006ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.514541  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I0812 14:38:13.533720  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (3.10629ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.535143  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.535187  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.535228  110675 httplog.go:90] GET /healthz: (1.878734ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.553291  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.611146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.553965  110675 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I0812 14:38:13.575766  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (5.160114ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.577703  110675 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.461815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.593183  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (2.572187ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.593454  110675 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0812 14:38:13.613269  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (2.65225ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.613882  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.613908  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.613968  110675 httplog.go:90] GET /healthz: (2.026733ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:13.615617  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.813688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.634274  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (3.646992ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.634884  110675 storage_rbac.go:254] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I0812 14:38:13.634949  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.634974  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.635006  110675 httplog.go:90] GET /healthz: (1.460703ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.653028  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (1.89279ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.655046  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.490214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.673351  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.739493ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.673687  110675 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0812 14:38:13.692294  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.617256ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.694574  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.663727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.713823  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.713869  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.713948  110675 httplog.go:90] GET /healthz: (1.867354ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:13.714916  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (4.229345ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.715191  110675 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0812 14:38:13.732081  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (1.422998ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.734853  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.734882  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.734930  110675 httplog.go:90] GET /healthz: (1.565104ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.735458  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.731349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.753432  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.808476ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.754228  110675 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0812 14:38:13.773392  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (1.274924ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.775387  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.352492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.793300  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.620622ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.793570  110675 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0812 14:38:13.812210  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (1.432187ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.813225  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.813257  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.813291  110675 httplog.go:90] GET /healthz: (1.126349ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:13.814340  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.508551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.834410  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.834449  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.834487  110675 httplog.go:90] GET /healthz: (1.062975ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.834931  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (4.411476ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.835262  110675 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0812 14:38:13.852820  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::extension-apiserver-authentication-reader: (1.881674ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.855367  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.843035ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.875205  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (4.076612ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.878928  110675 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system::extension-apiserver-authentication-reader in kube-system
I0812 14:38:13.893082  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (2.132297ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.895807  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.785393ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.913001  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.378196ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.913158  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.913183  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.913221  110675 httplog.go:90] GET /healthz: (1.136113ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:13.913248  110675 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I0812 14:38:13.938978  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:13.939018  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:13.939091  110675 httplog.go:90] GET /healthz: (4.01559ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:13.939332  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (3.667084ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.944273  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (4.227231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.953938  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.10249ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.954226  110675 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I0812 14:38:13.973390  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (2.824665ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.976046  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.895666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.992788  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.079989ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:13.993013  110675 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I0812 14:38:14.018319  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (7.679595ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:14.018487  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:14.018518  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:14.018575  110675 httplog.go:90] GET /healthz: (6.690365ms) 0 [Go-http-client/1.1 127.0.0.1:56040]
I0812 14:38:14.021916  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.803283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:14.033822  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.080763ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:14.034018  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:14.034276  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:14.034361  110675 httplog.go:90] GET /healthz: (1.226809ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.034374  110675 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0812 14:38:14.052095  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (1.506882ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.054731  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.773268ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.074229  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.258695ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.074765  110675 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I0812 14:38:14.093115  110675 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (2.019641ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.096131  110675 httplog.go:90] GET /api/v1/namespaces/kube-public: (2.142228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.113063  110675 healthz.go:169] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I0812 14:38:14.113104  110675 healthz.go:183] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
healthz check failed
I0812 14:38:14.113140  110675 httplog.go:90] GET /healthz: (1.264633ms) 0 [Go-http-client/1.1 127.0.0.1:56260]
I0812 14:38:14.113754  110675 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (3.131258ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.114102  110675 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I0812 14:38:14.137042  110675 httplog.go:90] GET /healthz: (3.647019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.139378  110675 httplog.go:90] GET /api/v1/namespaces/default: (1.410624ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.142441  110675 httplog.go:90] POST /api/v1/namespaces: (2.229506ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.144915  110675 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.792814ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.150165  110675 httplog.go:90] POST /api/v1/namespaces/default/services: (4.516414ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.152189  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.318293ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.154335  110675 httplog.go:90] POST /api/v1/namespaces/default/endpoints: (1.672646ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.213230  110675 httplog.go:90] GET /healthz: (1.196761ms) 200 [Go-http-client/1.1 127.0.0.1:56040]
W0812 14:38:14.214095  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214132  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214154  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214184  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214203  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214212  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214234  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214244  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214253  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214332  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W0812 14:38:14.214352  110675 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
I0812 14:38:14.214375  110675 factory.go:299] Creating scheduler from algorithm provider 'DefaultProvider'
I0812 14:38:14.214386  110675 factory.go:387] Creating scheduler with fit predicates 'map[CheckNodeCondition:{} CheckNodeDiskPressure:{} CheckNodeMemoryPressure:{} CheckNodePIDPressure:{} CheckVolumeBinding:{} GeneralPredicates:{} MatchInterPodAffinity:{} MaxAzureDiskVolumeCount:{} MaxCSIVolumeCountPred:{} MaxEBSVolumeCount:{} MaxGCEPDVolumeCount:{} NoDiskConflict:{} NoVolumeZoneConflict:{} PodToleratesNodeTaints:{}]' and priority functions 'map[BalancedResourceAllocation:{} ImageLocalityPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} NodeAffinityPriority:{} NodePreferAvoidPodsPriority:{} SelectorSpreadPriority:{} TaintTolerationPriority:{}]'
I0812 14:38:14.215103  110675 reflector.go:122] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215134  110675 reflector.go:160] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215296  110675 reflector.go:122] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215342  110675 reflector.go:160] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215434  110675 reflector.go:122] Starting reflector *v1beta1.CSINode (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215454  110675 reflector.go:160] Listing and watching *v1beta1.CSINode from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215703  110675 reflector.go:122] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215720  110675 reflector.go:160] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215321  110675 reflector.go:122] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.215778  110675 reflector.go:160] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.216180  110675 reflector.go:122] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.216196  110675 reflector.go:160] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.216280  110675 httplog.go:90] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (813.688µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.216615  110675 reflector.go:122] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.216630  110675 reflector.go:160] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.216823  110675 httplog.go:90] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (695.176µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:14.216962  110675 reflector.go:122] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.216977  110675 reflector.go:160] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.217269  110675 httplog.go:90] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (630.589µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56460]
I0812 14:38:14.217331  110675 httplog.go:90] GET /apis/storage.k8s.io/v1beta1/csinodes?limit=500&resourceVersion=0: (399.228µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.217345  110675 reflector.go:122] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.217360  110675 reflector.go:160] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.217707  110675 reflector.go:122] Starting reflector *v1.Pod (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.217722  110675 reflector.go:160] Listing and watching *v1.Pod from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.217947  110675 httplog.go:90] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (370.424µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56462]
I0812 14:38:14.218094  110675 reflector.go:122] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.218108  110675 reflector.go:160] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:133
I0812 14:38:14.217360  110675 httplog.go:90] GET /api/v1/nodes?limit=500&resourceVersion=0: (502.034µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56458]
I0812 14:38:14.218827  110675 httplog.go:90] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (445.876µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:38:14.218836  110675 get.go:250] Starting watch for /apis/apps/v1/statefulsets, rv=28556 labels= fields= timeout=8m18s
I0812 14:38:14.219259  110675 get.go:250] Starting watch for /apis/storage.k8s.io/v1beta1/csinodes, rv=28555 labels= fields= timeout=6m43s
I0812 14:38:14.219260  110675 get.go:250] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=28555 labels= fields= timeout=9m15s
I0812 14:38:14.219463  110675 httplog.go:90] GET /api/v1/services?limit=500&resourceVersion=0: (587.823µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56468]
I0812 14:38:14.219501  110675 httplog.go:90] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (510.555µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56470]
I0812 14:38:14.220012  110675 get.go:250] Starting watch for /api/v1/replicationcontrollers, rv=28553 labels= fields= timeout=9m56s
I0812 14:38:14.220505  110675 httplog.go:90] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (422.992µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56260]
I0812 14:38:14.220505  110675 get.go:250] Starting watch for /api/v1/persistentvolumeclaims, rv=28553 labels= fields= timeout=7m38s
I0812 14:38:14.220703  110675 get.go:250] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=28555 labels= fields= timeout=8m30s
I0812 14:38:14.221231  110675 get.go:250] Starting watch for /api/v1/services, rv=29083 labels= fields= timeout=5m2s
I0812 14:38:14.221267  110675 get.go:250] Starting watch for /apis/apps/v1/replicasets, rv=28556 labels= fields= timeout=7m12s
I0812 14:38:14.221272  110675 get.go:250] Starting watch for /api/v1/persistentvolumes, rv=28553 labels= fields= timeout=7m19s
I0812 14:38:14.221718  110675 httplog.go:90] GET /api/v1/pods?limit=500&resourceVersion=0: (682.779µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56462]
I0812 14:38:14.221805  110675 get.go:250] Starting watch for /api/v1/nodes, rv=28553 labels= fields= timeout=8m45s
I0812 14:38:14.222483  110675 get.go:250] Starting watch for /api/v1/pods, rv=28553 labels= fields= timeout=7m17s
I0812 14:38:14.315011  110675 shared_informer.go:177] caches populated
I0812 14:38:14.415504  110675 shared_informer.go:177] caches populated
I0812 14:38:14.515852  110675 shared_informer.go:177] caches populated
I0812 14:38:14.616101  110675 shared_informer.go:177] caches populated
I0812 14:38:14.716340  110675 shared_informer.go:177] caches populated
I0812 14:38:14.817052  110675 shared_informer.go:177] caches populated
I0812 14:38:14.917246  110675 shared_informer.go:177] caches populated
I0812 14:38:15.017469  110675 shared_informer.go:177] caches populated
I0812 14:38:15.117728  110675 shared_informer.go:177] caches populated
I0812 14:38:15.217950  110675 shared_informer.go:177] caches populated
I0812 14:38:15.218051  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:15.219998  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:15.220068  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:15.220738  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:15.221026  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:15.221655  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:15.222259  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:15.318166  110675 shared_informer.go:177] caches populated
I0812 14:38:15.418437  110675 shared_informer.go:177] caches populated
I0812 14:38:15.421751  110675 httplog.go:90] POST /api/v1/nodes: (2.701642ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:15.422130  110675 node_tree.go:93] Added node "test-node-0" in group "" to NodeTree
I0812 14:38:15.426001  110675 httplog.go:90] POST /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods: (3.391035ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:15.426314  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/waiting-pod
I0812 14:38:15.426332  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/waiting-pod
I0812 14:38:15.426497  110675 scheduler_binder.go:256] AssumePodVolumes for pod "preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/waiting-pod", node "test-node-0"
I0812 14:38:15.426520  110675 scheduler_binder.go:266] AssumePodVolumes for pod "preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/waiting-pod", node "test-node-0": all PVCs bound and nothing to do
I0812 14:38:15.426597  110675 framework.go:558] waiting for 30s for pod "waiting-pod" at permit
I0812 14:38:15.434373  110675 factory.go:622] Attempting to bind signalling-pod to test-node-1
I0812 14:38:15.434809  110675 factory.go:622] Attempting to bind waiting-pod to test-node-0
I0812 14:38:15.435734  110675 scheduler.go:447] Failed to bind pod: permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod
E0812 14:38:15.435766  110675 scheduler.go:449] scheduler cache ForgetPod failed: pod c8be111b-901b-447a-a85a-0c3d536b7e98 wasn't assumed so cannot be forgotten
E0812 14:38:15.435785  110675 scheduler.go:605] error binding pod: Post http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod/binding: dial tcp 127.0.0.1:38585: connect: connection refused
E0812 14:38:15.436017  110675 factory.go:573] Error scheduling permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod: Post http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod/binding: dial tcp 127.0.0.1:38585: connect: connection refused; retrying
I0812 14:38:15.436063  110675 factory.go:631] Updating pod condition for permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod to (PodScheduled==False, Reason=SchedulerError)
E0812 14:38:15.436838  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
E0812 14:38:15.436997  110675 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:38585/apis/events.k8s.io/v1beta1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/events: dial tcp 127.0.0.1:38585: connect: connection refused' (may retry after sleeping)
E0812 14:38:15.438784  110675 scheduler.go:280] Error updating the condition of the pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod: Put http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod/status: dial tcp 127.0.0.1:38585: connect: connection refused
I0812 14:38:15.438873  110675 httplog.go:90] POST /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/waiting-pod/binding: (3.53845ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:15.439166  110675 scheduler.go:614] pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/waiting-pod is bound successfully on node "test-node-0", 1 nodes evaluated, 1 nodes were found feasible. Bound node resource: "Capacity: CPU<500m>|Memory<500>|Pods<32>|StorageEphemeral<0>; Allocatable: CPU<500m>|Memory<500>|Pods<32>|StorageEphemeral<0>.".
I0812 14:38:15.443858  110675 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/events: (4.341464ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
E0812 14:38:15.637524  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
E0812 14:38:16.038156  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
I0812 14:38:16.218513  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:16.220233  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:16.220378  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:16.220906  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:16.221250  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:16.222284  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:16.222585  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0812 14:38:16.838846  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
I0812 14:38:17.218715  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:17.220419  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:17.220707  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:17.221124  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:17.221360  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:17.222430  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:17.222787  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:18.218861  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:18.220637  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:18.220837  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:18.221294  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:18.221535  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:18.222614  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:18.222981  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0812 14:38:18.439478  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
I0812 14:38:19.219601  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:19.220826  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:19.220968  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:19.221444  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:19.221700  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:19.222760  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:19.223154  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:20.220974  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:20.221487  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:20.221562  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:20.221641  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:20.221908  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:20.222925  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:20.223332  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:21.221263  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:21.221770  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:21.221808  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:21.221811  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:21.222034  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:21.223225  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:21.224146  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0812 14:38:21.640131  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
I0812 14:38:22.221403  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:22.221893  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:22.222082  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:22.222107  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:22.222297  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:22.223415  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:22.224357  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:23.221625  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:23.222038  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:23.222343  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:23.222368  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:23.222442  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:23.223586  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:23.224541  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:24.139829  110675 httplog.go:90] GET /api/v1/namespaces/default: (1.784852ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:24.142046  110675 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.674112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:24.144057  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.568991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:24.221742  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:24.222199  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:24.222539  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:24.222670  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:24.222746  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:24.224740  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:24.225954  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:25.221934  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:25.222359  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:25.222715  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:25.222737  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:25.222855  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:25.224950  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:25.226109  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:26.222797  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:26.222980  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:26.222995  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:26.225667  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:26.225725  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:26.225784  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:26.226404  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0812 14:38:26.670664  110675 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:38585/apis/events.k8s.io/v1beta1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/events: dial tcp 127.0.0.1:38585: connect: connection refused' (may retry after sleeping)
I0812 14:38:27.223025  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:27.223199  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:27.223225  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:27.225904  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:27.225952  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:27.226052  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:27.226635  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0812 14:38:28.040791  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
I0812 14:38:28.223273  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:28.223690  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:28.223733  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:28.226350  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:28.226377  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:28.226480  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:28.226868  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:29.223519  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:29.223991  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:29.224062  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:29.226486  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:29.226597  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:29.226639  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:29.227038  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:30.223752  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:30.224208  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:30.224224  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:30.226743  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:30.226743  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:30.226956  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:30.227178  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:31.223994  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:31.224371  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:31.224404  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:31.226921  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:31.226974  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:31.227110  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:31.227318  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:32.224201  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:32.224447  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:32.224503  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:32.227344  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:32.227394  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:32.227505  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:32.227624  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:33.224395  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:33.224628  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:33.224660  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:33.227603  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:33.227616  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:33.227768  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:33.227795  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:34.146586  110675 httplog.go:90] GET /api/v1/namespaces/default: (8.33744ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:34.149293  110675 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (2.028695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:34.151842  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.898972ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:34.224642  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:34.224813  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:34.224825  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:34.227716  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:34.227831  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:34.227998  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:34.228109  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:35.224875  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:35.225009  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:35.225079  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:35.227968  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:35.227970  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:35.228188  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:35.228213  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:36.225105  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:36.225110  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:36.225221  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:36.228119  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:36.228129  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:36.228378  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:36.228431  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:37.225321  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:37.225487  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:37.225533  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:37.228309  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:37.228349  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:37.228609  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:37.228627  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0812 14:38:37.896173  110675 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:38585/apis/events.k8s.io/v1beta1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/events: dial tcp 127.0.0.1:38585: connect: connection refused' (may retry after sleeping)
I0812 14:38:38.225632  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:38.225785  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:38.225952  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:38.228476  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:38.228526  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:38.228806  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:38.228826  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:39.225865  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:39.225997  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:39.226074  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:39.228681  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:39.228681  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:39.228938  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:39.228941  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:40.226071  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:40.226151  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:40.226184  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:40.228947  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:40.228999  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:40.229095  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:40.229191  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
E0812 14:38:40.841499  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
I0812 14:38:41.226263  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:41.226319  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:41.226349  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:41.229132  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:41.229149  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:41.229362  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:41.229379  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:42.226481  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:42.226518  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:42.226494  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:42.229298  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:42.229399  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:42.229507  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:42.229613  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:43.226673  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:43.226681  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:43.226683  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:43.229531  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:43.229531  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:43.229932  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:43.229961  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:44.140193  110675 httplog.go:90] GET /api/v1/namespaces/default: (1.824936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:44.142347  110675 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.575662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:44.144309  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.398634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:44.226888  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:44.226935  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:44.226950  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:44.229737  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:44.229748  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:44.230098  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:44.230100  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:45.227135  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:45.227133  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:45.227133  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:45.229973  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:45.229974  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:45.230260  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:45.230277  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:45.429762  110675 httplog.go:90] POST /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods: (2.965471ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:45.430119  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:45.430145  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:45.430326  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:45.430373  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:45.432220  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.510616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:45.433964  110675 httplog.go:90] PUT /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod/status: (3.32956ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:45.435042  110675 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/events: (3.760003ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:45.435836  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.224689ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56482]
I0812 14:38:45.436127  110675 generic_scheduler.go:1193] Node test-node-0 is a potential node for preemption.
I0812 14:38:45.438754  110675 httplog.go:90] PUT /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod/status: (2.226459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:45.441757  110675 httplog.go:90] DELETE /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/waiting-pod: (2.531825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:45.444623  110675 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/events: (1.812117ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:45.532734  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.968631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:45.633179  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.530305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:45.733688  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.856019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:45.833492  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.048789ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:45.933471  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.674865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:46.032945  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.248933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:46.132785  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.169186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:46.227403  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:46.227512  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:46.227403  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:46.230165  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:46.230214  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:46.230396  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:46.230426  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:46.230901  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:46.230919  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:46.231459  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:46.231704  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:46.234287  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.230518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:46.234981  110675 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/events: (2.37301ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:46.235601  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.917069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60294]
I0812 14:38:46.235611  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.91805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60306]
I0812 14:38:46.332804  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.193905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:46.432881  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.171113ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:46.533083  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.332756ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:46.632742  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.061164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:46.732918  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.291699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:46.832752  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.133505ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:46.932703  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.095108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.035077  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.420835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.151427  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.438527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.220776  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:47.220824  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:47.220987  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:47.221056  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:47.227635  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:47.227675  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:47.227931  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:47.228282  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (6.04741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.228749  110675 httplog.go:90] PATCH /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/events/preemptor-pod.15ba33c168076d8b: (6.009761ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60320]
I0812 14:38:47.229048  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (7.259566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:47.230335  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:47.230368  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:47.230747  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:47.230857  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:47.230870  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:47.230993  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:47.231026  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:47.231158  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:47.232216  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.751529ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:47.233431  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.729045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60322]
I0812 14:38:47.233720  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.513171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.332863  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.238605ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.432913  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.258568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.532717  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.049815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.632728  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.031796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.733362  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.587533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.832719  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.134743ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:47.933077  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.021688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.033453  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.829828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.133123  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.190176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.227816  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:48.228000  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:48.228128  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:48.230518  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:48.230626  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:48.230945  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:48.231122  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:48.231141  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:48.231285  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:48.231337  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:48.231587  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:48.234688  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.823683ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.234701  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.848231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:48.234688  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.429847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60354]
I0812 14:38:48.333276  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.625562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.432703  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.07913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.532661  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.976771ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.632889  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.211681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.733237  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.694045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.832773  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.067953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:48.932720  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.993318ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:49.032666  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.043069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:49.133100  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.429042ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:49.228049  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:49.228186  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:49.228364  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:49.231126  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:49.231246  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:49.231258  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:49.231294  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:49.231324  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:49.231442  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:49.231481  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:49.232617  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:49.233227  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.536563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:49.233481  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.359226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:49.233801  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.588802ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60364]
I0812 14:38:49.332442  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.828843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:49.432628  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.914236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:49.536167  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (5.489518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:49.632249  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.58894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:49.733067  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.372178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:49.833100  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.367379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
E0812 14:38:49.923968  110675 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:38585/apis/events.k8s.io/v1beta1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/events: dial tcp 127.0.0.1:38585: connect: connection refused' (may retry after sleeping)
I0812 14:38:49.932821  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.142181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:50.032932  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.300818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:50.143639  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (6.308656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:50.228263  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:50.228332  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:50.228528  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:50.231479  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:50.231660  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:50.232707  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.187562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:50.232828  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:50.232981  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:50.233137  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:50.233169  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:50.233351  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:50.233424  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:50.236052  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.18759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:50.236052  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.113749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:50.332839  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.146661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:50.432684  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.043343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:50.532845  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.170345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:50.632910  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.278228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:50.733055  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.438034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:50.832740  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.088671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:50.932754  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.07883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.032685  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.026044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.133451  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.882419ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.228468  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:51.228468  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:51.228966  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:51.231839  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:51.231879  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:51.233036  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.387126ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.233377  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:51.233408  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:51.233564  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:51.233580  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:51.233724  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:51.233765  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:51.237267  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.175597ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.237707  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.257237ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:51.333035  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.347144ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.432672  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.981437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.532712  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.014723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.632614  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.974335ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.732856  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.198752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.832993  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.319039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:51.932295  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.583233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:52.034750  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.15359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:52.132271  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.730938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:52.228715  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:52.228767  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:52.229139  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:52.232369  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:52.232378  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:52.232517  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.880186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:52.233583  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:52.233599  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:52.233720  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:52.233743  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:52.233933  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:52.233985  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:52.236099  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.865579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:52.236099  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.726625ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:52.332589  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.97521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:52.437230  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (6.510544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:52.532753  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.144388ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:52.632594  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.931071ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:52.732374  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.751114ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:52.832315  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.761292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:52.932520  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.809695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.032297  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.73253ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.135158  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.559688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.228916  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:53.229061  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:53.229385  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:53.232595  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:53.233808  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:53.234026  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:53.234047  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:53.234206  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:53.234258  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:53.235742  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:53.236113  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:53.238168  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.311118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.238172  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.713486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:53.238509  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.069791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60308]
I0812 14:38:53.332655  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.046234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.432724  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.171632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.532110  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.51056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.636376  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (5.809583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.732514  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.985839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.833191  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.517455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:53.934345  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.137519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.032798  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.238656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.132333  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.699367ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.140425  110675 httplog.go:90] GET /api/v1/namespaces/default: (1.921602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.142742  110675 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.759516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.144601  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.392834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.229118  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:54.229563  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:54.229565  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:54.232066  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.590458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.232743  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:54.234003  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:54.234128  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:54.234139  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:54.234259  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:54.234300  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:54.235919  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:54.236307  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:54.236611  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.855227ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:54.237855  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.711642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.332238  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.603767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.432357  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.833899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.532570  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.722347ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.632679  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.0819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.732301  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.735682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.832459  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.777599ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:54.932626  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.031508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:55.032654  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.12167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:55.132761  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.272154ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:55.229415  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:55.230115  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:55.230116  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:55.232301  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.765282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:55.232927  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:55.234188  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:55.234396  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:55.234419  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:55.234578  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:55.234635  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:55.236096  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:55.236469  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:55.237061  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.602334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:55.237363  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.394455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:55.332412  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.753082ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:55.432197  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.586447ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:55.532327  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.756568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:55.632311  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.760174ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:55.732303  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.664455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:55.833728  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.462544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:55.933177  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.514957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.032073  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.51698ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.133427  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.233206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.229653  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:56.230263  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:56.230335  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:56.232283  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.551342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.233139  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:56.234420  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:56.234606  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:56.234628  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:56.234812  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:56.234865  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:56.236286  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:56.236620  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:56.236762  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.571917ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:56.236852  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.749195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.333358  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.356685ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.433645  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.091484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.532171  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.556746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.632500  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.894886ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.732527  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.934351ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.832819  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.112282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:56.932695  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.990211ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.032798  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.190691ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.133001  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.018671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.229823  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:57.231117  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:57.231174  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:57.232453  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.974955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.233322  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:57.234632  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:57.234755  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:57.234777  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:57.234891  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:57.234923  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:57.236458  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:57.236791  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:57.237472  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.046417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:57.237473  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.018987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.334224  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.585287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.432681  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.95552ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.532799  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.105588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.632679  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.048059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.733654  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.947902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.832647  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.025597ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:57.932864  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.123526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:58.032893  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.165324ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:58.132690  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.044629ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:58.230020  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:58.231291  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:58.231320  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:58.232348  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.750703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:58.233493  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:58.234919  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:58.235063  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:58.235078  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:58.235255  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:58.235348  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:58.236656  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:58.236927  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:58.237278  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.6508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:58.237349  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.589616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:58.332662  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.892586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:58.432654  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.991467ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:58.532870  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.769019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:58.632708  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.079671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:58.732939  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.331449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:58.832934  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.287986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:58.933638  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.423011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.032955  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.395093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.132686  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.090944ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.230220  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:59.231486  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:59.231599  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:59.233237  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.435438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.233637  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:59.235092  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:59.235325  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:59.235341  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:38:59.235517  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:38:59.235586  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:38:59.236810  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:59.237056  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:38:59.238117  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.051302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:38:59.238121  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.20151ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.332580  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.941277ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.432532  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.886201ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.532514  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.873494ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.632704  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.172941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.732908  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.225695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.832859  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.248023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:38:59.932919  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.141313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:39:00.032810  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.157524ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:39:00.132702  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.045782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:39:00.230507  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:00.231636  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:00.231753  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:00.232712  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.079501ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:39:00.233841  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:00.235353  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:00.235513  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:00.235535  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:00.235720  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:00.235775  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:00.236985  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:00.237226  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:00.238259  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.030093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:39:00.238319  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.073942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:00.332778  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.227104ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:00.433189  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.86427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:00.532656  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.135616ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:00.632781  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.165645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:00.732854  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.218471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:00.832807  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.194486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:00.932770  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.091898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.033118  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.360048ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.132723  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.069209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.230716  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:01.231844  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:01.232508  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:01.232901  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.25846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.234007  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:01.235587  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:01.235758  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:01.235786  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:01.235938  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:01.236158  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:01.237217  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:01.237448  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:01.238467  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.798824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:39:01.238983  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.046103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.332774  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.008555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.432854  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.1567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.532957  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.312349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.633766  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.197433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
E0812 14:39:01.684292  110675 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:38585/apis/events.k8s.io/v1beta1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/events: dial tcp 127.0.0.1:38585: connect: connection refused' (may retry after sleeping)
I0812 14:39:01.732951  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.264737ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.834682  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.000047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:01.933015  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.450899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.032868  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.191519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.132157  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.593713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.230887  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:02.231981  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:02.232084  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:02.232095  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:02.232256  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:02.232313  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:02.232838  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:02.233703  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.005785ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.234174  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:02.235341  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.344429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:39:02.235769  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:02.235897  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:02.235911  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:02.236048  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:02.236094  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:02.237382  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:02.237612  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:02.237831  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.791818ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:02.239233  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.771442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
E0812 14:39:02.239882  110675 factory.go:597] pod is already present in the backoffQ
I0812 14:39:02.240167  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.56506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56476]
I0812 14:39:02.333141  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.50111ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.432690  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.025871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.532746  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.110626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.633255  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.582228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.732684  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.020009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.832818  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.114259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:02.932740  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.1774ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.032327  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.822812ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.132461  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.83513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.231215  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:03.232596  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.937128ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.232920  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:03.232993  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:03.234359  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:03.235951  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:03.236090  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:03.236109  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:03.236327  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:03.236386  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:03.237643  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:03.238287  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:03.238569  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.832101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.238577  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.833228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:03.332904  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.274653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.432676  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.031539ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.532813  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.12288ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.632821  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.161273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.733165  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.430603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.832742  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.05912ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:03.932582  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.99175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.033069  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.405969ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.132914  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.403503ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.141152  110675 httplog.go:90] GET /api/v1/namespaces/default: (2.691601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.143142  110675 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.506064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.145033  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.335516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.231473  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:04.232614  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.973609ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.233062  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:04.233167  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:04.234590  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:04.236156  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:04.236300  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:04.236336  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:04.236481  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:04.236532  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:04.237785  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:04.238438  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:04.238737  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.806348ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:04.239367  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.426731ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.332675  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.045994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.440309  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (9.574301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.532602  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.926125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.632509  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.893371ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.733005  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.221041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.832881  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.19247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:04.933063  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.381471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:05.032747  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.076186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:05.134893  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.333485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:05.232871  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.223374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:05.233040  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:05.233327  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:05.233342  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:05.234825  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:05.236363  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:05.236521  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:05.236578  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:05.236740  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:05.236793  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:05.237950  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:05.238617  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:05.239310  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.938768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:05.239327  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.947093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:05.332997  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.311221ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:05.432880  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.240586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:05.533165  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.511583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:05.633053  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.955374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:05.732412  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.81777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:05.832588  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.941465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:05.932598  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.936922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:06.032807  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.193103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:06.132869  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.210691ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:06.233044  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.148537ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:06.233443  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:06.234956  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:06.235469  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:06.235502  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:06.236566  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:06.236736  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:06.236756  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:06.236905  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:06.236953  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:06.238157  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:06.239145  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.860193ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:06.239145  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.742472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:06.239571  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:06.332944  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.214935ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:06.432941  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.372745ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
E0812 14:39:06.442288  110675 factory.go:606] Error getting pod permit-plugin5846294a-933c-4944-8c99-dbb789283b23/signalling-pod for retry: Get http://127.0.0.1:38585/api/v1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/pods/signalling-pod: dial tcp 127.0.0.1:38585: connect: connection refused; retrying...
I0812 14:39:06.532818  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.159662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:06.632700  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.00558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:06.733198  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.360439ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:06.832599  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.903486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:06.932607  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.081056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.032684  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.056997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.132995  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.353581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.232819  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.173156ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.233642  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:07.235196  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:07.235806  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:07.235970  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:07.236837  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:07.236979  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:07.236999  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:07.237164  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:07.237209  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:07.239497  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.987511ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.239864  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.219833ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:07.240130  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:07.240200  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:07.332688  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.077376ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.432733  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.049341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.544753  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (10.694231ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.632794  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.963963ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.733228  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.536039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.833139  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.278408ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:07.933254  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.608014ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:08.035007  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.36246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:08.132889  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.140694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:08.232782  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.08097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:08.233876  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:08.235364  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:08.236011  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:08.236174  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:08.237035  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:08.237188  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:08.237208  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:08.237401  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:08.237464  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:08.239859  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.063266ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:08.239899  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.932874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:08.240250  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:08.240311  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:08.333159  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.472116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:08.435413  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.871023ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:08.532500  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.819208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:08.633302  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.690843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:08.732702  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.101538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:08.832443  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.778228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:08.932668  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.134342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:09.032591  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.990123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:09.132829  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.209126ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:09.233807  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.124886ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:09.234137  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:09.235618  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:09.236197  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:09.236341  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:09.237196  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:09.237298  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:09.237309  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:09.237413  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:09.237445  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:09.239508  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.739163ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:09.239634  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.74494ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:09.240405  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:09.240431  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:09.333012  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.294267ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:09.432851  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.114078ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:09.533460  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.750862ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:09.633341  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.291494ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:09.732980  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.42668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:09.834407  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.762618ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:09.937596  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.915991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:10.032562  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.981198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:10.132193  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.683328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:10.232516  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.956759ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:10.234349  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:10.235830  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:10.236396  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:10.236499  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:10.237387  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:10.237516  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:10.237536  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:10.237766  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:10.237825  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:10.239728  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.599745ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:10.240540  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:10.240591  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:10.241148  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.963634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.332362  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.834355ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.432853  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.203311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.532616  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.036147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.632920  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.246816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.732587  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.799569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.832888  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.134645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.856424  110675 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.644164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.858353  110675 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.495648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.859963  110675 httplog.go:90] GET /api/v1/namespaces/kube-node-lease: (1.241373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:10.932175  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.546633ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:11.032312  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.825471ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:11.136026  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.533584ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:11.232525  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.956797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:11.234501  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:11.235981  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:11.236612  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:11.236616  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:11.237595  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:11.237715  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:11.237727  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:11.237890  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:11.237938  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:11.240465  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.073542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:11.240465  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.070856ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:11.240795  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:11.240821  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:11.332756  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.1025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:11.433460  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.808302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:11.532311  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.690838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:11.632234  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.5206ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:11.732681  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.128397ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:11.832421  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.83171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
E0812 14:39:11.849819  110675 event_broadcaster.go:242] Unable to write event: 'Post http://127.0.0.1:38585/apis/events.k8s.io/v1beta1/namespaces/permit-plugin5846294a-933c-4944-8c99-dbb789283b23/events: dial tcp 127.0.0.1:38585: connect: connection refused' (may retry after sleeping)
I0812 14:39:11.934309  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.15224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:12.032477  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.829154ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:12.135102  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.535184ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:12.232298  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.632757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:12.234666  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:12.236164  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:12.236743  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:12.236913  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:12.237705  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:12.237874  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:12.237892  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:12.238050  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:12.238107  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:12.240219  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.558902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:12.240226  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.726936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:12.240938  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:12.240976  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:12.332297  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.799264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:12.432293  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.743533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:12.532983  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.312645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:12.632699  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.022068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:12.736760  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (6.147918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:12.833063  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.329053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:12.933220  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.316607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.032847  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.25943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.133326  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.716764ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.232793  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.158158ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.234929  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:13.236370  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:13.236988  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:13.237233  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:13.237869  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:13.237999  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:13.238012  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:13.238198  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:13.238244  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:13.240706  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.943676ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:13.240707  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.944344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.241079  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:13.241094  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:13.332689  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.149784ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.432752  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.13078ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.532765  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.215769ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.632430  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.783948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.732830  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.196027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.833133  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.591506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:13.935531  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.492593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:14.032934  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.227882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:14.136161  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (4.439485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:14.140005  110675 httplog.go:90] GET /api/v1/namespaces/default: (1.296894ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:14.141675  110675 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.32003ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:14.143315  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.194526ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:14.232527  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.966322ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:14.235095  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:14.236572  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:14.237157  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:14.237839  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:14.238012  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:14.238142  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:14.238155  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:14.238295  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:14.238344  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:14.240246  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.594099ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:14.240261  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.205488ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:14.241216  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:14.241258  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:14.332471  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.832313ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:14.432157  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.631415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:14.532496  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.859092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:14.633436  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.907874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:14.732581  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.995403ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:14.832485  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.858177ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:14.932858  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.252883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.032686  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.102275ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.133874  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.946353ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.232699  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.052773ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.235265  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:15.236919  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:15.237346  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:15.238035  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:15.238209  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:15.238907  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:15.238930  110675 scheduler.go:477] Attempting to schedule pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:15.239093  110675 factory.go:557] Unable to schedule preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I0812 14:39:15.239139  110675 factory.go:631] Updating pod condition for preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I0812 14:39:15.241376  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.798468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:15.241460  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.83945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.241585  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:15.241646  110675 reflector.go:243] k8s.io/client-go/informers/factory.go:133: forcing resync
I0812 14:39:15.333270  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (2.45909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.433675  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (3.067269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.435573  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.343825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.440829  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/waiting-pod: (4.486453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.458883  110675 httplog.go:90] DELETE /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/waiting-pod: (17.554315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.475717  110675 scheduling_queue.go:830] About to try and schedule pod preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:15.475757  110675 scheduler.go:473] Skip schedule deleting pod: preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/preemptor-pod
I0812 14:39:15.479087  110675 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/events: (2.858515ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:34004]
I0812 14:39:15.479624  110675 httplog.go:90] DELETE /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (20.25915ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.484708  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/waiting-pod: (2.945744ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.487931  110675 httplog.go:90] GET /api/v1/namespaces/preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/pods/preemptor-pod: (1.166348ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
E0812 14:39:15.489173  110675 scheduling_queue.go:833] Error while retrieving next pod from scheduling queue: scheduling queue is closed
I0812 14:39:15.489501  110675 httplog.go:90] GET /apis/storage.k8s.io/v1beta1/csinodes?allowWatchBookmarks=true&resourceVersion=28555&timeout=6m43s&timeoutSeconds=403&watch=true: (1m1.270479905s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56464]
I0812 14:39:15.489511  110675 httplog.go:90] GET /apis/policy/v1beta1/poddisruptionbudgets?allowWatchBookmarks=true&resourceVersion=28555&timeout=9m15s&timeoutSeconds=555&watch=true: (1m1.270498747s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56466]
I0812 14:39:15.489595  110675 httplog.go:90] GET /api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=28553&timeout=7m19s&timeoutSeconds=439&watch=true: (1m1.268663226s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56478]
I0812 14:39:15.489687  110675 httplog.go:90] GET /apis/apps/v1/statefulsets?allowWatchBookmarks=true&resourceVersion=28556&timeout=8m18s&timeoutSeconds=498&watch=true: (1m1.27128797s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56460]
I0812 14:39:15.489702  110675 httplog.go:90] GET /apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=28555&timeout=8m30s&timeoutSeconds=510&watch=true: (1m1.269523258s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56474]
I0812 14:39:15.489713  110675 httplog.go:90] GET /api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=28553&timeout=7m38s&timeoutSeconds=458&watch=true: (1m1.269601452s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56458]
I0812 14:39:15.489772  110675 httplog.go:90] GET /api/v1/nodes?allowWatchBookmarks=true&resourceVersion=28553&timeout=8m45s&timeoutSeconds=525&watch=true: (1m1.268404325s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56480]
I0812 14:39:15.489807  110675 httplog.go:90] GET /apis/apps/v1/replicasets?allowWatchBookmarks=true&resourceVersion=28556&timeout=7m12s&timeoutSeconds=432&watch=true: (1m1.268805529s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56040]
I0812 14:39:15.489820  110675 httplog.go:90] GET /api/v1/replicationcontrollers?allowWatchBookmarks=true&resourceVersion=28553&timeout=9m56s&timeoutSeconds=596&watch=true: (1m1.270191386s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56472]
I0812 14:39:15.489918  110675 httplog.go:90] GET /api/v1/pods?allowWatchBookmarks=true&resourceVersion=28553&timeout=7m17s&timeoutSeconds=437&watch=true: (1m1.267764635s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56462]
I0812 14:39:15.489929  110675 httplog.go:90] GET /api/v1/services?allowWatchBookmarks=true&resourceVersion=29083&timeout=5m2s&timeoutSeconds=302&watch=true: (1m1.268997258s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:56468]
I0812 14:39:15.497216  110675 httplog.go:90] DELETE /api/v1/nodes: (7.385572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.497478  110675 controller.go:176] Shutting down kubernetes service endpoint reconciler
I0812 14:39:15.499679  110675 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.878954ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
I0812 14:39:15.502911  110675 httplog.go:90] PUT /api/v1/namespaces/default/endpoints/kubernetes: (2.75606ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:60816]
--- FAIL: TestPreemptWithPermitPlugin (64.90s)
    framework_test.go:1618: Expected the preemptor pod to be scheduled. error: timed out waiting for the condition
    framework_test.go:1622: Expected the waiting pod to get preempted and deleted

				from junit_eb089aee80105aff5db0557ae4449d31f19359f2_20190812-143101.xml

Find preempt-with-permit-plugind30418c4-0ee5-4706-bcbe-e8b15c1f2cf1/waiting-pod mentions in log files | View test history on testgrid


Show 2469 Passed Tests

Show 4 Skipped Tests

Error lines from build-log.txt

... skipping 787 lines ...
W0812 14:25:42.982] I0812 14:25:42.976699   53136 taint_manager.go:162] Sending events to api server.
W0812 14:25:42.982] I0812 14:25:42.977049   53136 node_lifecycle_controller.go:418] Controller will reconcile labels.
W0812 14:25:42.982] I0812 14:25:42.977156   53136 node_lifecycle_controller.go:431] Controller will taint node by condition.
W0812 14:25:42.982] I0812 14:25:42.977212   53136 controllermanager.go:535] Started "nodelifecycle"
W0812 14:25:42.982] I0812 14:25:42.977324   53136 node_lifecycle_controller.go:455] Starting node controller
W0812 14:25:42.983] I0812 14:25:42.977391   53136 controller_utils.go:1029] Waiting for caches to sync for taint controller
W0812 14:25:42.983] E0812 14:25:42.978327   53136 core.go:78] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail
W0812 14:25:42.983] W0812 14:25:42.978391   53136 controllermanager.go:527] Skipping "service"
W0812 14:25:42.983] I0812 14:25:42.979501   53136 controllermanager.go:535] Started "statefulset"
W0812 14:25:42.984] I0812 14:25:42.979573   53136 stateful_set.go:145] Starting stateful set controller
W0812 14:25:42.984] I0812 14:25:42.979595   53136 controller_utils.go:1029] Waiting for caches to sync for stateful set controller
W0812 14:25:42.984] I0812 14:25:42.980044   53136 node_lifecycle_controller.go:77] Sending events to api server
W0812 14:25:42.984] E0812 14:25:42.980149   53136 core.go:175] failed to start cloud node lifecycle controller: no cloud provider provided
W0812 14:25:42.984] W0812 14:25:42.980161   53136 controllermanager.go:527] Skipping "cloud-node-lifecycle"
W0812 14:25:42.985] I0812 14:25:42.980833   53136 controllermanager.go:535] Started "pv-protection"
W0812 14:25:42.985] I0812 14:25:42.980894   53136 pv_protection_controller.go:82] Starting PV protection controller
W0812 14:25:42.985] I0812 14:25:42.980909   53136 controller_utils.go:1029] Waiting for caches to sync for PV protection controller
W0812 14:25:43.016] W0812 14:25:43.015532   53136 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist
W0812 14:25:43.018] The Service "kubernetes" is invalid: spec.clusterIP: Invalid value: "10.0.0.1": provided IP is already allocated
W0812 14:25:43.067] I0812 14:25:43.067075   53136 controller_utils.go:1036] Caches are synced for deployment controller
W0812 14:25:43.068] I0812 14:25:43.068325   53136 controller_utils.go:1036] Caches are synced for persistent volume controller
W0812 14:25:43.074] I0812 14:25:43.073990   53136 controller_utils.go:1036] Caches are synced for ClusterRoleAggregator controller
W0812 14:25:43.074] I0812 14:25:43.074523   53136 controller_utils.go:1036] Caches are synced for ReplicationController controller
W0812 14:25:43.077] I0812 14:25:43.077533   53136 controller_utils.go:1036] Caches are synced for taint controller
W0812 14:25:43.078] I0812 14:25:43.078021   53136 node_lifecycle_controller.go:1189] Initializing eviction metric for zone: 
W0812 14:25:43.078] I0812 14:25:43.078353   53136 node_lifecycle_controller.go:1039] Controller detected that all Nodes are not-Ready. Entering master disruption mode.
W0812 14:25:43.079] I0812 14:25:43.078693   53136 taint_manager.go:186] Starting NoExecuteTaintManager
W0812 14:25:43.079] I0812 14:25:43.078895   53136 event.go:255] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"127.0.0.1", UID:"b3313bfd-2014-4197-9067-b4fc387a1fe1", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node 127.0.0.1 event: Registered Node 127.0.0.1 in Controller
W0812 14:25:43.081] I0812 14:25:43.081205   53136 controller_utils.go:1036] Caches are synced for PV protection controller
W0812 14:25:43.096] I0812 14:25:43.095691   53136 controller_utils.go:1036] Caches are synced for namespace controller
W0812 14:25:43.097] I0812 14:25:43.096683   53136 controller_utils.go:1036] Caches are synced for TTL controller
W0812 14:25:43.104] E0812 14:25:43.103882   53136 clusterroleaggregation_controller.go:180] view failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "view": the object has been modified; please apply your changes to the latest version and try again
W0812 14:25:43.107] I0812 14:25:43.107050   53136 controller_utils.go:1036] Caches are synced for expand controller
W0812 14:25:43.108] I0812 14:25:43.108202   53136 controller_utils.go:1036] Caches are synced for PVC protection controller
W0812 14:25:43.110] I0812 14:25:43.110351   53136 controller_utils.go:1036] Caches are synced for HPA controller
W0812 14:25:43.111] I0812 14:25:43.110730   53136 controller_utils.go:1036] Caches are synced for ReplicaSet controller
W0812 14:25:43.111] I0812 14:25:43.111119   53136 controller_utils.go:1036] Caches are synced for disruption controller
W0812 14:25:43.111] I0812 14:25:43.111194   53136 disruption.go:341] Sending events to api server.
... skipping 93 lines ...
I0812 14:25:47.073] +++ working dir: /go/src/k8s.io/kubernetes
I0812 14:25:47.077] +++ command: run_RESTMapper_evaluation_tests
I0812 14:25:47.091] +++ [0812 14:25:47] Creating namespace namespace-1565619947-8792
I0812 14:25:47.173] namespace/namespace-1565619947-8792 created
I0812 14:25:47.254] Context "test" modified.
I0812 14:25:47.263] +++ [0812 14:25:47] Testing RESTMapper
I0812 14:25:47.390] +++ [0812 14:25:47] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype"
I0812 14:25:47.409] +++ exit code: 0
I0812 14:25:47.557] NAME                              SHORTNAMES   APIGROUP                       NAMESPACED   KIND
I0812 14:25:47.557] bindings                                                                      true         Binding
I0812 14:25:47.557] componentstatuses                 cs                                          false        ComponentStatus
I0812 14:25:47.558] configmaps                        cm                                          true         ConfigMap
I0812 14:25:47.558] endpoints                         ep                                          true         Endpoints
... skipping 643 lines ...
I0812 14:26:06.912] core.sh:186: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:26:07.092] (Bcore.sh:190: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:26:07.194] (Bcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:26:07.370] (Bcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:26:07.471] (Bcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:26:07.562] (Bpod "valid-pod" force deleted
W0812 14:26:07.663] error: resource(s) were provided, but no name, label selector, or --all flag specified
W0812 14:26:07.663] error: setting 'all' parameter but found a non empty selector. 
W0812 14:26:07.664] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0812 14:26:07.764] core.sh:206: Successful get pods -l'name in (valid-pod)' {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:26:07.769] (Bcore.sh:211: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:: :
I0812 14:26:07.851] (Bnamespace/test-kubectl-describe-pod created
I0812 14:26:07.957] core.sh:215: Successful get namespaces/test-kubectl-describe-pod {{.metadata.name}}: test-kubectl-describe-pod
I0812 14:26:08.053] (Bcore.sh:219: Successful get secrets --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: 
... skipping 11 lines ...
I0812 14:26:09.104] (Bpoddisruptionbudget.policy/test-pdb-3 created
I0812 14:26:09.206] core.sh:251: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2
I0812 14:26:09.289] (Bpoddisruptionbudget.policy/test-pdb-4 created
I0812 14:26:09.394] core.sh:255: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50%
I0812 14:26:09.569] (Bcore.sh:261: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:26:09.768] (Bpod/env-test-pod created
W0812 14:26:09.868] error: min-available and max-unavailable cannot be both specified
I0812 14:26:09.973] core.sh:264: Successful describe pods --namespace=test-kubectl-describe-pod env-test-pod:
I0812 14:26:09.973] Name:         env-test-pod
I0812 14:26:09.973] Namespace:    test-kubectl-describe-pod
I0812 14:26:09.973] Priority:     0
I0812 14:26:09.973] Node:         <none>
I0812 14:26:09.974] Labels:       <none>
... skipping 173 lines ...
I0812 14:26:24.434] (Bpod/valid-pod patched
I0812 14:26:24.539] core.sh:470: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: changed-with-yaml:
I0812 14:26:24.618] (Bpod/valid-pod patched
I0812 14:26:24.716] core.sh:475: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:3.1:
I0812 14:26:24.880] (Bpod/valid-pod patched
I0812 14:26:24.987] core.sh:491: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx:
I0812 14:26:25.182] (B+++ [0812 14:26:25] "kubectl patch with resourceVersion 499" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again
I0812 14:26:25.438] pod "valid-pod" deleted
I0812 14:26:25.450] pod/valid-pod replaced
I0812 14:26:25.558] core.sh:515: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname
I0812 14:26:25.727] (BSuccessful
I0812 14:26:25.728] message:error: --grace-period must have --force specified
I0812 14:26:25.728] has:\-\-grace-period must have \-\-force specified
I0812 14:26:25.894] Successful
I0812 14:26:25.895] message:error: --timeout must have --force specified
I0812 14:26:25.895] has:\-\-timeout must have \-\-force specified
I0812 14:26:26.055] node/node-v1-test created
W0812 14:26:26.156] W0812 14:26:26.055937   53136 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist
I0812 14:26:26.257] node/node-v1-test replaced
I0812 14:26:26.355] core.sh:552: Successful get node node-v1-test {{.metadata.annotations.a}}: b
I0812 14:26:26.448] (Bnode "node-v1-test" deleted
I0812 14:26:26.558] core.sh:559: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx:
I0812 14:26:26.872] (Bcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/serve_hostname:
I0812 14:26:27.977] (Bcore.sh:575: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod
... skipping 25 lines ...
I0812 14:26:28.579] (Bcore.sh:593: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod
I0812 14:26:28.682] (Bpod/valid-pod labeled
W0812 14:26:28.783] Edit cancelled, no changes made.
W0812 14:26:28.784] Edit cancelled, no changes made.
W0812 14:26:28.784] Edit cancelled, no changes made.
W0812 14:26:28.784] Edit cancelled, no changes made.
W0812 14:26:28.784] error: 'name' already has a value (valid-pod), and --overwrite is false
I0812 14:26:28.885] core.sh:597: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan
I0812 14:26:28.907] (Bcore.sh:601: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:26:28.998] (Bpod "valid-pod" force deleted
W0812 14:26:29.099] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0812 14:26:29.200] core.sh:605: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:26:29.200] (B+++ [0812 14:26:29] Creating namespace namespace-1565619989-28642
... skipping 82 lines ...
I0812 14:26:36.930] +++ Running case: test-cmd.run_kubectl_create_error_tests 
I0812 14:26:36.934] +++ working dir: /go/src/k8s.io/kubernetes
I0812 14:26:36.937] +++ command: run_kubectl_create_error_tests
I0812 14:26:36.955] +++ [0812 14:26:36] Creating namespace namespace-1565619996-4340
I0812 14:26:37.044] namespace/namespace-1565619996-4340 created
I0812 14:26:37.131] Context "test" modified.
I0812 14:26:37.140] +++ [0812 14:26:37] Testing kubectl create with error
W0812 14:26:37.241] Error: must specify one of -f and -k
W0812 14:26:37.242] 
W0812 14:26:37.242] Create a resource from a file or from stdin.
W0812 14:26:37.242] 
W0812 14:26:37.242]  JSON and YAML formats are accepted.
W0812 14:26:37.242] 
W0812 14:26:37.242] Examples:
... skipping 41 lines ...
W0812 14:26:37.248] 
W0812 14:26:37.249] Usage:
W0812 14:26:37.249]   kubectl create -f FILENAME [options]
W0812 14:26:37.249] 
W0812 14:26:37.249] Use "kubectl <command> --help" for more information about a given command.
W0812 14:26:37.249] Use "kubectl options" for a list of global command-line options (applies to all commands).
I0812 14:26:37.418] +++ [0812 14:26:37] "kubectl create with empty string list returns error as expected: error: error validating "hack/testdata/invalid-rc-with-empty-args.yaml": error validating data: ValidationError(ReplicationController.spec.template.spec.containers[0].args): unknown object type "nil" in ReplicationController.spec.template.spec.containers[0].args[0]; if you choose to ignore these errors, turn validation off with --validate=false
W0812 14:26:37.519] kubectl convert is DEPRECATED and will be removed in a future version.
W0812 14:26:37.519] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
I0812 14:26:37.634] +++ exit code: 0
I0812 14:26:37.685] Recording: run_kubectl_apply_tests
I0812 14:26:37.685] Running command: run_kubectl_apply_tests
I0812 14:26:37.714] 
... skipping 20 lines ...
W0812 14:26:40.294] I0812 14:26:40.293794   49693 client.go:354] scheme "" not registered, fallback to default scheme
W0812 14:26:40.294] I0812 14:26:40.294016   49693 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
W0812 14:26:40.294] I0812 14:26:40.294103   49693 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
W0812 14:26:40.296] I0812 14:26:40.296124   49693 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
W0812 14:26:40.299] I0812 14:26:40.298439   49693 controller.go:606] quota admission added evaluator for: resources.mygroup.example.com
I0812 14:26:40.399] kind.mygroup.example.com/myobj serverside-applied (server dry run)
W0812 14:26:40.501] Error from server (NotFound): resources.mygroup.example.com "myobj" not found
I0812 14:26:40.602] customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted
I0812 14:26:40.603] +++ exit code: 0
I0812 14:26:40.643] Recording: run_kubectl_run_tests
I0812 14:26:40.644] Running command: run_kubectl_run_tests
I0812 14:26:40.671] 
I0812 14:26:40.676] +++ Running case: test-cmd.run_kubectl_run_tests 
... skipping 96 lines ...
I0812 14:26:43.897] Context "test" modified.
I0812 14:26:43.905] +++ [0812 14:26:43] Testing kubectl create filter
I0812 14:26:44.012] create.sh:30: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:26:44.238] (Bpod/selector-test-pod created
I0812 14:26:44.362] create.sh:34: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod
I0812 14:26:44.467] (BSuccessful
I0812 14:26:44.467] message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found
I0812 14:26:44.468] has:pods "selector-test-pod-dont-apply" not found
I0812 14:26:44.559] pod "selector-test-pod" deleted
I0812 14:26:44.581] +++ exit code: 0
I0812 14:26:44.622] Recording: run_kubectl_apply_deployments_tests
I0812 14:26:44.623] Running command: run_kubectl_apply_deployments_tests
I0812 14:26:44.648] 
... skipping 29 lines ...
W0812 14:26:47.258] I0812 14:26:47.162502   53136 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565620004-20512", Name:"nginx", UID:"c06d0274-93de-46c7-b5f1-599f4b7e5c78", APIVersion:"apps/v1", ResourceVersion:"582", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-7dbc4d9f to 3
W0812 14:26:47.259] I0812 14:26:47.167064   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-7dbc4d9f", UID:"b7f35727-21aa-472a-b7ed-fc481ac5756f", APIVersion:"apps/v1", ResourceVersion:"583", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-tb99k
W0812 14:26:47.260] I0812 14:26:47.171843   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-7dbc4d9f", UID:"b7f35727-21aa-472a-b7ed-fc481ac5756f", APIVersion:"apps/v1", ResourceVersion:"583", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-x68n7
W0812 14:26:47.260] I0812 14:26:47.172311   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-7dbc4d9f", UID:"b7f35727-21aa-472a-b7ed-fc481ac5756f", APIVersion:"apps/v1", ResourceVersion:"583", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-7dbc4d9f-hwdsf
I0812 14:26:47.361] apps.sh:148: Successful get deployment nginx {{.metadata.name}}: nginx
I0812 14:26:51.529] (BSuccessful
I0812 14:26:51.529] message:Error from server (Conflict): error when applying patch:
I0812 14:26:51.530] {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1565620004-20512\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}}
I0812 14:26:51.530] to:
I0812 14:26:51.530] Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment"
I0812 14:26:51.530] Name: "nginx", Namespace: "namespace-1565620004-20512"
I0812 14:26:51.533] Object: &{map["apiVersion":"apps/v1" "kind":"Deployment" "metadata":map["annotations":map["deployment.kubernetes.io/revision":"1" "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1565620004-20512\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx1\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx1\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"] "creationTimestamp":"2019-08-12T14:26:47Z" "generation":'\x01' "labels":map["name":"nginx"] "managedFields":[map["apiVersion":"apps/v1" "fields":map["f:metadata":map["f:annotations":map["f:deployment.kubernetes.io/revision":map[]]] "f:status":map["f:conditions":map[".":map[] "k:{\"type\":\"Available\"}":map[".":map[] "f:lastTransitionTime":map[] "f:lastUpdateTime":map[] "f:message":map[] "f:reason":map[] "f:status":map[] "f:type":map[]] "k:{\"type\":\"Progressing\"}":map[".":map[] "f:lastTransitionTime":map[] "f:lastUpdateTime":map[] "f:message":map[] "f:reason":map[] "f:status":map[] "f:type":map[]]] "f:observedGeneration":map[] "f:replicas":map[] "f:unavailableReplicas":map[] "f:updatedReplicas":map[]]] "manager":"kube-controller-manager" "operation":"Update" "time":"2019-08-12T14:26:47Z"] map["apiVersion":"apps/v1" "fields":map["f:metadata":map["f:annotations":map[".":map[] "f:kubectl.kubernetes.io/last-applied-configuration":map[]] "f:labels":map[".":map[] "f:name":map[]]] "f:spec":map["f:progressDeadlineSeconds":map[] "f:replicas":map[] "f:revisionHistoryLimit":map[] "f:selector":map["f:matchLabels":map[".":map[] "f:name":map[]]] "f:strategy":map["f:rollingUpdate":map[".":map[] "f:maxSurge":map[] "f:maxUnavailable":map[]] "f:type":map[]] "f:template":map["f:metadata":map["f:labels":map[".":map[] "f:name":map[]]] "f:spec":map["f:containers":map["k:{\"name\":\"nginx\"}":map[".":map[] "f:image":map[] "f:imagePullPolicy":map[] "f:name":map[] "f:ports":map[".":map[] "k:{\"containerPort\":80,\"protocol\":\"TCP\"}":map[".":map[] "f:containerPort":map[] "f:protocol":map[]]] "f:resources":map[] "f:terminationMessagePath":map[] "f:terminationMessagePolicy":map[]]] "f:dnsPolicy":map[] "f:restartPolicy":map[] "f:schedulerName":map[] "f:securityContext":map[] "f:terminationGracePeriodSeconds":map[]]]]] "manager":"kubectl" "operation":"Update" "time":"2019-08-12T14:26:47Z"]] "name":"nginx" "namespace":"namespace-1565620004-20512" "resourceVersion":"595" "selfLink":"/apis/apps/v1/namespaces/namespace-1565620004-20512/deployments/nginx" "uid":"c06d0274-93de-46c7-b5f1-599f4b7e5c78"] "spec":map["progressDeadlineSeconds":'\u0258' "replicas":'\x03' "revisionHistoryLimit":'\n' "selector":map["matchLabels":map["name":"nginx1"]] "strategy":map["rollingUpdate":map["maxSurge":"25%" "maxUnavailable":"25%"] "type":"RollingUpdate"] "template":map["metadata":map["creationTimestamp":<nil> "labels":map["name":"nginx1"]] "spec":map["containers":[map["image":"k8s.gcr.io/nginx:test-cmd" "imagePullPolicy":"IfNotPresent" "name":"nginx" "ports":[map["containerPort":'P' "protocol":"TCP"]] "resources":map[] "terminationMessagePath":"/dev/termination-log" "terminationMessagePolicy":"File"]] "dnsPolicy":"ClusterFirst" "restartPolicy":"Always" "schedulerName":"default-scheduler" "securityContext":map[] "terminationGracePeriodSeconds":'\x1e']]] "status":map["conditions":[map["lastTransitionTime":"2019-08-12T14:26:47Z" "lastUpdateTime":"2019-08-12T14:26:47Z" "message":"Deployment does not have minimum availability." "reason":"MinimumReplicasUnavailable" "status":"False" "type":"Available"] map["lastTransitionTime":"2019-08-12T14:26:47Z" "lastUpdateTime":"2019-08-12T14:26:47Z" "message":"ReplicaSet \"nginx-7dbc4d9f\" is progressing." "reason":"ReplicaSetUpdated" "status":"True" "type":"Progressing"]] "observedGeneration":'\x01' "replicas":'\x03' "unavailableReplicas":'\x03' "updatedReplicas":'\x03']]}
I0812 14:26:51.534] for: "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again
I0812 14:26:51.534] has:Error from server (Conflict)
W0812 14:26:51.634] I0812 14:26:51.216211   53136 horizontal.go:341] Horizontal Pod Autoscaler frontend has been deleted in namespace-1565619994-8737
I0812 14:26:56.807] deployment.apps/nginx configured
W0812 14:26:56.909] I0812 14:26:56.811838   53136 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565620004-20512", Name:"nginx", UID:"c5fb4480-9628-4ffc-a09d-740f61d7022e", APIVersion:"apps/v1", ResourceVersion:"619", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-594f77b9f6 to 3
W0812 14:26:56.909] I0812 14:26:56.818819   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-594f77b9f6", UID:"945875a8-2c4b-49d6-8848-edcb29e5f2bc", APIVersion:"apps/v1", ResourceVersion:"620", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-6jssm
W0812 14:26:56.910] I0812 14:26:56.823624   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-594f77b9f6", UID:"945875a8-2c4b-49d6-8848-edcb29e5f2bc", APIVersion:"apps/v1", ResourceVersion:"620", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-twl9c
W0812 14:26:56.910] I0812 14:26:56.824299   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-594f77b9f6", UID:"945875a8-2c4b-49d6-8848-edcb29e5f2bc", APIVersion:"apps/v1", ResourceVersion:"620", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-czm2m
I0812 14:26:57.010] Successful
I0812 14:26:57.011] message:        "name": "nginx2"
I0812 14:26:57.011]           "name": "nginx2"
I0812 14:26:57.011] has:"name": "nginx2"
W0812 14:27:01.191] E0812 14:27:01.190302   53136 replica_set.go:450] Sync "namespace-1565620004-20512/nginx-594f77b9f6" failed with Operation cannot be fulfilled on replicasets.apps "nginx-594f77b9f6": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1565620004-20512/nginx-594f77b9f6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 945875a8-2c4b-49d6-8848-edcb29e5f2bc, UID in object meta: 
W0812 14:27:02.179] I0812 14:27:02.178729   53136 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565620004-20512", Name:"nginx", UID:"f60e264f-cb3a-4fc0-a8c1-cf856edc4505", APIVersion:"apps/v1", ResourceVersion:"650", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-594f77b9f6 to 3
W0812 14:27:02.185] I0812 14:27:02.184899   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-594f77b9f6", UID:"7f4fe6f0-5d3c-41a4-890a-807952ef55b2", APIVersion:"apps/v1", ResourceVersion:"651", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-jhcgj
W0812 14:27:02.190] I0812 14:27:02.189340   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-594f77b9f6", UID:"7f4fe6f0-5d3c-41a4-890a-807952ef55b2", APIVersion:"apps/v1", ResourceVersion:"651", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-2fwbw
W0812 14:27:02.193] I0812 14:27:02.192133   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620004-20512", Name:"nginx-594f77b9f6", UID:"7f4fe6f0-5d3c-41a4-890a-807952ef55b2", APIVersion:"apps/v1", ResourceVersion:"651", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-594f77b9f6-tj8l5
I0812 14:27:02.294] Successful
I0812 14:27:02.294] message:The Deployment "nginx" is invalid: spec.template.metadata.labels: Invalid value: map[string]string{"name":"nginx3"}: `selector` does not match template `labels`
... skipping 159 lines ...
I0812 14:27:04.356] +++ [0812 14:27:04] Creating namespace namespace-1565620024-197
I0812 14:27:04.433] namespace/namespace-1565620024-197 created
I0812 14:27:04.509] Context "test" modified.
I0812 14:27:04.519] +++ [0812 14:27:04] Testing kubectl get
I0812 14:27:04.614] get.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:27:04.708] (BSuccessful
I0812 14:27:04.709] message:Error from server (NotFound): pods "abc" not found
I0812 14:27:04.709] has:pods "abc" not found
I0812 14:27:04.807] get.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:27:04.900] (BSuccessful
I0812 14:27:04.900] message:Error from server (NotFound): pods "abc" not found
I0812 14:27:04.900] has:pods "abc" not found
I0812 14:27:04.997] get.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:27:05.090] (BSuccessful
I0812 14:27:05.090] message:{
I0812 14:27:05.090]     "apiVersion": "v1",
I0812 14:27:05.091]     "items": [],
... skipping 23 lines ...
I0812 14:27:05.445] has not:No resources found
I0812 14:27:05.528] Successful
I0812 14:27:05.528] message:NAME
I0812 14:27:05.528] has not:No resources found
I0812 14:27:05.617] get.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:27:05.718] (BSuccessful
I0812 14:27:05.718] message:error: the server doesn't have a resource type "foobar"
I0812 14:27:05.718] has not:No resources found
I0812 14:27:05.809] Successful
I0812 14:27:05.810] message:No resources found in namespace-1565620024-197 namespace.
I0812 14:27:05.810] has:No resources found
I0812 14:27:05.901] Successful
I0812 14:27:05.902] message:
I0812 14:27:05.902] has not:No resources found
I0812 14:27:05.994] Successful
I0812 14:27:05.994] message:No resources found in namespace-1565620024-197 namespace.
I0812 14:27:05.994] has:No resources found
I0812 14:27:06.095] get.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:27:06.191] (BSuccessful
I0812 14:27:06.191] message:Error from server (NotFound): pods "abc" not found
I0812 14:27:06.192] has:pods "abc" not found
I0812 14:27:06.195] FAIL!
I0812 14:27:06.196] message:Error from server (NotFound): pods "abc" not found
I0812 14:27:06.196] has not:List
I0812 14:27:06.196] 99 /go/src/k8s.io/kubernetes/test/cmd/../../test/cmd/get.sh
I0812 14:27:06.321] Successful
I0812 14:27:06.322] message:I0812 14:27:06.269318   63715 loader.go:375] Config loaded from file:  /tmp/tmp.QAZctyhJp0/.kube/config
I0812 14:27:06.322] I0812 14:27:06.271816   63715 round_trippers.go:438] GET http://127.0.0.1:8080/version?timeout=32s 200 OK in 1 milliseconds
I0812 14:27:06.323] I0812 14:27:06.294093   63715 round_trippers.go:438] GET http://127.0.0.1:8080/api/v1/namespaces/default/pods 200 OK in 2 milliseconds
... skipping 660 lines ...
I0812 14:27:11.993] Successful
I0812 14:27:11.993] message:NAME    DATA   AGE
I0812 14:27:11.993] one     0      0s
I0812 14:27:11.993] three   0      0s
I0812 14:27:11.994] two     0      0s
I0812 14:27:11.994] STATUS    REASON          MESSAGE
I0812 14:27:11.994] Failure   InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0812 14:27:11.994] has not:watch is only supported on individual resources
I0812 14:27:13.091] Successful
I0812 14:27:13.092] message:STATUS    REASON          MESSAGE
I0812 14:27:13.092] Failure   InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0812 14:27:13.093] has not:watch is only supported on individual resources
I0812 14:27:13.099] +++ [0812 14:27:13] Creating namespace namespace-1565620033-15975
I0812 14:27:13.177] namespace/namespace-1565620033-15975 created
I0812 14:27:13.256] Context "test" modified.
I0812 14:27:13.360] get.sh:157: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:27:13.535] (Bpod/valid-pod created
... skipping 104 lines ...
I0812 14:27:13.654] }
I0812 14:27:13.735] get.sh:162: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:27:14.001] (B<no value>Successful
I0812 14:27:14.001] message:valid-pod:
I0812 14:27:14.002] has:valid-pod:
I0812 14:27:14.090] Successful
I0812 14:27:14.091] message:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template:
I0812 14:27:14.091] 	template was:
I0812 14:27:14.092] 		{.missing}
I0812 14:27:14.092] 	object given to jsonpath engine was:
I0812 14:27:14.094] 		map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2019-08-12T14:27:13Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fields":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:priority":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl", "operation":"Update", "time":"2019-08-12T14:27:13Z"}}, "name":"valid-pod", "namespace":"namespace-1565620033-15975", "resourceVersion":"694", "selfLink":"/api/v1/namespaces/namespace-1565620033-15975/pods/valid-pod", "uid":"259d37d6-9292-4d47-80da-0358273a57ad"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"k8s.gcr.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}}
I0812 14:27:14.094] has:missing is not found
W0812 14:27:14.195] error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing"
I0812 14:27:14.296] Successful
I0812 14:27:14.297] message:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template:
I0812 14:27:14.297] 	template was:
I0812 14:27:14.297] 		{{.missing}}
I0812 14:27:14.297] 	raw data was:
I0812 14:27:14.299] 		{"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2019-08-12T14:27:13Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fields":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:priority":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl","operation":"Update","time":"2019-08-12T14:27:13Z"}],"name":"valid-pod","namespace":"namespace-1565620033-15975","resourceVersion":"694","selfLink":"/api/v1/namespaces/namespace-1565620033-15975/pods/valid-pod","uid":"259d37d6-9292-4d47-80da-0358273a57ad"},"spec":{"containers":[{"image":"k8s.gcr.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}}
I0812 14:27:14.299] 	object given to template engine was:
I0812 14:27:14.300] 		map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2019-08-12T14:27:13Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fields:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:priority:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl operation:Update time:2019-08-12T14:27:13Z]] name:valid-pod namespace:namespace-1565620033-15975 resourceVersion:694 selfLink:/api/v1/namespaces/namespace-1565620033-15975/pods/valid-pod uid:259d37d6-9292-4d47-80da-0358273a57ad] spec:map[containers:[map[image:k8s.gcr.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]]
I0812 14:27:14.300] has:map has no entry for key "missing"
I0812 14:27:15.293] Successful
I0812 14:27:15.294] message:NAME        READY   STATUS    RESTARTS   AGE
I0812 14:27:15.294] valid-pod   0/1     Pending   0          1s
I0812 14:27:15.294] STATUS      REASON          MESSAGE
I0812 14:27:15.294] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0812 14:27:15.294] has:STATUS
I0812 14:27:15.295] Successful
I0812 14:27:15.295] message:NAME        READY   STATUS    RESTARTS   AGE
I0812 14:27:15.295] valid-pod   0/1     Pending   0          1s
I0812 14:27:15.295] STATUS      REASON          MESSAGE
I0812 14:27:15.295] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0812 14:27:15.295] has:valid-pod
I0812 14:27:16.393] Successful
I0812 14:27:16.394] message:pod/valid-pod
I0812 14:27:16.394] has not:STATUS
I0812 14:27:16.395] Successful
I0812 14:27:16.395] message:pod/valid-pod
... skipping 144 lines ...
I0812 14:27:17.503] status:
I0812 14:27:17.503]   phase: Pending
I0812 14:27:17.504]   qosClass: Guaranteed
I0812 14:27:17.504] ---
I0812 14:27:17.504] has:name: valid-pod
I0812 14:27:17.585] Successful
I0812 14:27:17.585] message:Error from server (NotFound): pods "invalid-pod" not found
I0812 14:27:17.585] has:"invalid-pod" not found
I0812 14:27:17.668] pod "valid-pod" deleted
I0812 14:27:17.767] get.sh:200: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:27:17.913] (Bpod/redis-master created
I0812 14:27:17.918] pod/valid-pod created
I0812 14:27:18.013] Successful
... skipping 35 lines ...
I0812 14:27:19.210] +++ command: run_kubectl_exec_pod_tests
I0812 14:27:19.224] +++ [0812 14:27:19] Creating namespace namespace-1565620039-16762
I0812 14:27:19.302] namespace/namespace-1565620039-16762 created
I0812 14:27:19.380] Context "test" modified.
I0812 14:27:19.388] +++ [0812 14:27:19] Testing kubectl exec POD COMMAND
I0812 14:27:19.474] Successful
I0812 14:27:19.474] message:Error from server (NotFound): pods "abc" not found
I0812 14:27:19.474] has:pods "abc" not found
I0812 14:27:19.626] pod/test-pod created
I0812 14:27:19.736] Successful
I0812 14:27:19.737] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0812 14:27:19.737] has not:pods "test-pod" not found
I0812 14:27:19.738] Successful
I0812 14:27:19.739] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0812 14:27:19.739] has not:pod or type/name must be specified
I0812 14:27:19.822] pod "test-pod" deleted
I0812 14:27:19.844] +++ exit code: 0
I0812 14:27:19.885] Recording: run_kubectl_exec_resource_name_tests
I0812 14:27:19.885] Running command: run_kubectl_exec_resource_name_tests
I0812 14:27:19.910] 
... skipping 2 lines ...
I0812 14:27:19.920] +++ command: run_kubectl_exec_resource_name_tests
I0812 14:27:19.937] +++ [0812 14:27:19] Creating namespace namespace-1565620039-24622
I0812 14:27:20.022] namespace/namespace-1565620039-24622 created
I0812 14:27:20.102] Context "test" modified.
I0812 14:27:20.111] +++ [0812 14:27:20] Testing kubectl exec TYPE/NAME COMMAND
I0812 14:27:20.227] Successful
I0812 14:27:20.227] message:error: the server doesn't have a resource type "foo"
I0812 14:27:20.228] has:error:
I0812 14:27:20.317] Successful
I0812 14:27:20.317] message:Error from server (NotFound): deployments.apps "bar" not found
I0812 14:27:20.317] has:"bar" not found
I0812 14:27:20.477] pod/test-pod created
I0812 14:27:20.655] replicaset.apps/frontend created
W0812 14:27:20.756] I0812 14:27:20.661192   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620039-24622", Name:"frontend", UID:"db57aee7-92de-4dba-9989-a16c39f3d97e", APIVersion:"apps/v1", ResourceVersion:"746", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-s8zsc
W0812 14:27:20.757] I0812 14:27:20.665223   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620039-24622", Name:"frontend", UID:"db57aee7-92de-4dba-9989-a16c39f3d97e", APIVersion:"apps/v1", ResourceVersion:"746", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-zdxl5
W0812 14:27:20.757] I0812 14:27:20.665434   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620039-24622", Name:"frontend", UID:"db57aee7-92de-4dba-9989-a16c39f3d97e", APIVersion:"apps/v1", ResourceVersion:"746", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-82ppj
I0812 14:27:20.857] configmap/test-set-env-config created
I0812 14:27:20.934] Successful
I0812 14:27:20.934] message:error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented
I0812 14:27:20.934] has:not implemented
I0812 14:27:21.035] Successful
I0812 14:27:21.036] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0812 14:27:21.036] has not:not found
I0812 14:27:21.036] Successful
I0812 14:27:21.037] message:Error from server (BadRequest): pod test-pod does not have a host assigned
I0812 14:27:21.037] has not:pod or type/name must be specified
I0812 14:27:21.154] Successful
I0812 14:27:21.155] message:Error from server (BadRequest): pod frontend-82ppj does not have a host assigned
I0812 14:27:21.155] has not:not found
I0812 14:27:21.157] Successful
I0812 14:27:21.158] message:Error from server (BadRequest): pod frontend-82ppj does not have a host assigned
I0812 14:27:21.158] has not:pod or type/name must be specified
I0812 14:27:21.254] pod "test-pod" deleted
I0812 14:27:21.347] replicaset.apps "frontend" deleted
I0812 14:27:21.441] configmap "test-set-env-config" deleted
I0812 14:27:21.464] +++ exit code: 0
I0812 14:27:21.508] Recording: run_create_secret_tests
I0812 14:27:21.509] Running command: run_create_secret_tests
I0812 14:27:21.538] 
I0812 14:27:21.540] +++ Running case: test-cmd.run_create_secret_tests 
I0812 14:27:21.544] +++ working dir: /go/src/k8s.io/kubernetes
I0812 14:27:21.547] +++ command: run_create_secret_tests
I0812 14:27:21.651] Successful
I0812 14:27:21.651] message:Error from server (NotFound): secrets "mysecret" not found
I0812 14:27:21.651] has:secrets "mysecret" not found
I0812 14:27:21.824] Successful
I0812 14:27:21.824] message:Error from server (NotFound): secrets "mysecret" not found
I0812 14:27:21.824] has:secrets "mysecret" not found
I0812 14:27:21.826] Successful
I0812 14:27:21.827] message:user-specified
I0812 14:27:21.827] has:user-specified
I0812 14:27:21.909] Successful
I0812 14:27:21.999] {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","selfLink":"/api/v1/namespaces/default/configmaps/tester-update-cm","uid":"8dc20ca4-71a4-48c6-8de1-6c27ddf88e99","resourceVersion":"767","creationTimestamp":"2019-08-12T14:27:21Z"}}
... skipping 2 lines ...
I0812 14:27:22.181] has:uid
I0812 14:27:22.265] Successful
I0812 14:27:22.266] message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","selfLink":"/api/v1/namespaces/default/configmaps/tester-update-cm","uid":"8dc20ca4-71a4-48c6-8de1-6c27ddf88e99","resourceVersion":"768","creationTimestamp":"2019-08-12T14:27:21Z","managedFields":[{"manager":"kubectl","operation":"Update","apiVersion":"v1","time":"2019-08-12T14:27:22Z","fields":{"f:data":{"f:key1":{},".":{}}}}]},"data":{"key1":"config1"}}
I0812 14:27:22.266] has:config1
I0812 14:27:22.342] {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"8dc20ca4-71a4-48c6-8de1-6c27ddf88e99"}}
I0812 14:27:22.445] Successful
I0812 14:27:22.445] message:Error from server (NotFound): configmaps "tester-update-cm" not found
I0812 14:27:22.445] has:configmaps "tester-update-cm" not found
I0812 14:27:22.461] +++ exit code: 0
I0812 14:27:22.503] Recording: run_kubectl_create_kustomization_directory_tests
I0812 14:27:22.504] Running command: run_kubectl_create_kustomization_directory_tests
I0812 14:27:22.527] 
I0812 14:27:22.529] +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests 
... skipping 158 lines ...
I0812 14:27:25.397] valid-pod   0/1     Pending   0          0s
I0812 14:27:25.397] has:valid-pod
I0812 14:27:26.492] Successful
I0812 14:27:26.493] message:NAME        READY   STATUS    RESTARTS   AGE
I0812 14:27:26.493] valid-pod   0/1     Pending   0          0s
I0812 14:27:26.493] STATUS      REASON          MESSAGE
I0812 14:27:26.493] Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
I0812 14:27:26.493] has:Timeout exceeded while reading body
I0812 14:27:26.588] Successful
I0812 14:27:26.589] message:NAME        READY   STATUS    RESTARTS   AGE
I0812 14:27:26.590] valid-pod   0/1     Pending   0          1s
I0812 14:27:26.590] has:valid-pod
I0812 14:27:26.678] Successful
I0812 14:27:26.679] message:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)
I0812 14:27:26.679] has:Invalid timeout value
I0812 14:27:26.771] pod "valid-pod" deleted
I0812 14:27:26.794] +++ exit code: 0
I0812 14:27:26.831] Recording: run_crd_tests
I0812 14:27:26.831] Running command: run_crd_tests
I0812 14:27:26.858] 
... skipping 245 lines ...
W0812 14:27:32.015] I0812 14:27:29.991218   49693 controller.go:606] quota admission added evaluator for: foos.company.com
I0812 14:27:32.116] crd.sh:236: Successful get foos/test {{.patched}}: value1
I0812 14:27:32.121] (Bfoo.company.com/test patched
I0812 14:27:32.240] crd.sh:238: Successful get foos/test {{.patched}}: value2
I0812 14:27:32.332] (Bfoo.company.com/test patched
I0812 14:27:32.437] crd.sh:240: Successful get foos/test {{.patched}}: <no value>
I0812 14:27:32.617] (B+++ [0812 14:27:32] "kubectl patch --local" returns error as expected for CustomResource: error: cannot apply strategic merge patch for company.com/v1, Kind=Foo locally, try --type merge
I0812 14:27:32.691] {
I0812 14:27:32.692]     "apiVersion": "company.com/v1",
I0812 14:27:32.692]     "kind": "Foo",
I0812 14:27:32.692]     "metadata": {
I0812 14:27:32.692]         "annotations": {
I0812 14:27:32.692]             "kubernetes.io/change-cause": "kubectl patch foos/test --server=http://127.0.0.1:8080 --match-server-version=true --patch={\"patched\":null} --type=merge --record=true"
... skipping 350 lines ...
I0812 14:27:52.027] (Bnamespace/non-native-resources created
I0812 14:27:52.198] bar.company.com/test created
I0812 14:27:52.309] crd.sh:455: Successful get bars {{len .items}}: 1
I0812 14:27:52.392] (Bnamespace "non-native-resources" deleted
I0812 14:27:57.623] crd.sh:458: Successful get bars {{len .items}}: 0
I0812 14:27:57.798] (Bcustomresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted
W0812 14:27:57.899] Error from server (NotFound): namespaces "non-native-resources" not found
I0812 14:27:57.999] customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted
I0812 14:27:58.025] customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted
I0812 14:27:58.132] customresourcedefinition.apiextensions.k8s.io "validfoos.company.com" deleted
I0812 14:27:58.165] +++ exit code: 0
I0812 14:27:58.205] Recording: run_cmd_with_img_tests
I0812 14:27:58.206] Running command: run_cmd_with_img_tests
... skipping 10 lines ...
W0812 14:27:58.527] I0812 14:27:58.525136   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620078-11758", Name:"test1-9797f89d8", UID:"bfbf4e50-4556-477f-b202-e61a457d0374", APIVersion:"apps/v1", ResourceVersion:"921", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test1-9797f89d8-64dsj
I0812 14:27:58.628] Successful
I0812 14:27:58.628] message:deployment.apps/test1 created
I0812 14:27:58.628] has:deployment.apps/test1 created
I0812 14:27:58.629] deployment.apps "test1" deleted
I0812 14:27:58.702] Successful
I0812 14:27:58.703] message:error: Invalid image name "InvalidImageName": invalid reference format
I0812 14:27:58.703] has:error: Invalid image name "InvalidImageName": invalid reference format
I0812 14:27:58.718] +++ exit code: 0
I0812 14:27:58.763] +++ [0812 14:27:58] Testing recursive resources
I0812 14:27:58.769] +++ [0812 14:27:58] Creating namespace namespace-1565620078-28763
I0812 14:27:58.852] namespace/namespace-1565620078-28763 created
I0812 14:27:58.928] Context "test" modified.
I0812 14:27:59.028] generic-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:27:59.362] (Bgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:27:59.364] (BSuccessful
I0812 14:27:59.365] message:pod/busybox0 created
I0812 14:27:59.365] pod/busybox1 created
I0812 14:27:59.365] error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0812 14:27:59.365] has:error validating data: kind not set
I0812 14:27:59.462] generic-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:27:59.665] (Bgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox:
I0812 14:27:59.668] (BSuccessful
I0812 14:27:59.668] message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:27:59.669] has:Object 'Kind' is missing
I0812 14:27:59.767] generic-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:00.078] (Bgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced:
I0812 14:28:00.081] (BSuccessful
I0812 14:28:00.082] message:pod/busybox0 replaced
I0812 14:28:00.082] pod/busybox1 replaced
I0812 14:28:00.082] error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0812 14:28:00.082] has:error validating data: kind not set
I0812 14:28:00.182] generic-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:00.295] (BSuccessful
I0812 14:28:00.295] message:Name:         busybox0
I0812 14:28:00.296] Namespace:    namespace-1565620078-28763
I0812 14:28:00.296] Priority:     0
I0812 14:28:00.296] Node:         <none>
... skipping 159 lines ...
I0812 14:28:00.328] has:Object 'Kind' is missing
I0812 14:28:00.413] generic-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:00.619] (Bgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue:
I0812 14:28:00.621] (BSuccessful
I0812 14:28:00.621] message:pod/busybox0 annotated
I0812 14:28:00.621] pod/busybox1 annotated
I0812 14:28:00.622] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:00.622] has:Object 'Kind' is missing
I0812 14:28:00.722] generic-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:01.059] (Bgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced:
I0812 14:28:01.061] (BSuccessful
I0812 14:28:01.062] message:Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
I0812 14:28:01.062] pod/busybox0 configured
I0812 14:28:01.062] Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
I0812 14:28:01.062] pod/busybox1 configured
I0812 14:28:01.063] error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I0812 14:28:01.063] has:error validating data: kind not set
I0812 14:28:01.163] generic-resources.sh:265: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:01.350] (Bdeployment.apps/nginx created
W0812 14:28:01.450] W0812 14:27:58.811781   49693 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0812 14:28:01.451] E0812 14:27:58.813759   53136 reflector.go:282] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.451] W0812 14:27:58.919696   49693 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0812 14:28:01.452] E0812 14:27:58.921980   53136 reflector.go:282] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.452] W0812 14:27:59.037137   49693 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0812 14:28:01.452] E0812 14:27:59.038828   53136 reflector.go:282] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.453] W0812 14:27:59.144625   49693 cacher.go:154] Terminating all watchers from cacher *unstructured.Unstructured
W0812 14:28:01.453] E0812 14:27:59.146201   53136 reflector.go:282] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.453] E0812 14:27:59.815140   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.454] E0812 14:27:59.923397   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.454] E0812 14:28:00.040208   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.454] E0812 14:28:00.147868   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.455] E0812 14:28:00.816688   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.455] E0812 14:28:00.925211   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.455] E0812 14:28:01.041794   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.455] E0812 14:28:01.149576   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.456] I0812 14:28:01.357075   53136 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565620078-28763", Name:"nginx", UID:"d12b77db-a4e1-4dbb-aaee-5caad9af6700", APIVersion:"apps/v1", ResourceVersion:"946", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-bbbbb95b5 to 3
W0812 14:28:01.456] I0812 14:28:01.361879   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620078-28763", Name:"nginx-bbbbb95b5", UID:"39580064-c190-4156-9307-ade08923c1b5", APIVersion:"apps/v1", ResourceVersion:"947", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-lph6z
W0812 14:28:01.457] I0812 14:28:01.365452   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620078-28763", Name:"nginx-bbbbb95b5", UID:"39580064-c190-4156-9307-ade08923c1b5", APIVersion:"apps/v1", ResourceVersion:"947", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-6k25p
W0812 14:28:01.457] I0812 14:28:01.367325   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620078-28763", Name:"nginx-bbbbb95b5", UID:"39580064-c190-4156-9307-ade08923c1b5", APIVersion:"apps/v1", ResourceVersion:"947", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-bbbbb95b5-8xshq
I0812 14:28:01.557] generic-resources.sh:269: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0812 14:28:01.580] (Bgeneric-resources.sh:270: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
... skipping 41 lines ...
I0812 14:28:01.781]       terminationGracePeriodSeconds: 30
I0812 14:28:01.781] status: {}
I0812 14:28:01.781] has:extensions/v1beta1
I0812 14:28:01.865] deployment.apps "nginx" deleted
W0812 14:28:01.966] kubectl convert is DEPRECATED and will be removed in a future version.
W0812 14:28:01.967] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
W0812 14:28:01.967] E0812 14:28:01.818867   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:01.967] E0812 14:28:01.926759   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:02.044] E0812 14:28:02.043579   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:02.145] generic-resources.sh:281: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:02.174] (Bgeneric-resources.sh:285: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:02.177] (BSuccessful
I0812 14:28:02.177] message:kubectl convert is DEPRECATED and will be removed in a future version.
I0812 14:28:02.178] In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
I0812 14:28:02.178] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:02.178] has:Object 'Kind' is missing
I0812 14:28:02.283] generic-resources.sh:290: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:02.382] (BSuccessful
I0812 14:28:02.383] message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:02.383] has:busybox0:busybox1:
I0812 14:28:02.383] Successful
I0812 14:28:02.384] message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:02.384] has:Object 'Kind' is missing
I0812 14:28:02.489] generic-resources.sh:299: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:02.591] (Bpod/busybox0 labeled
I0812 14:28:02.591] pod/busybox1 labeled
I0812 14:28:02.591] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:02.688] generic-resources.sh:304: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue:
I0812 14:28:02.690] (BSuccessful
I0812 14:28:02.690] message:pod/busybox0 labeled
I0812 14:28:02.691] pod/busybox1 labeled
I0812 14:28:02.691] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:02.691] has:Object 'Kind' is missing
I0812 14:28:02.788] generic-resources.sh:309: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:02.909] (Bpod/busybox0 patched
I0812 14:28:02.910] pod/busybox1 patched
I0812 14:28:02.910] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:03.013] generic-resources.sh:314: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox:
I0812 14:28:03.016] (BSuccessful
I0812 14:28:03.016] message:pod/busybox0 patched
I0812 14:28:03.016] pod/busybox1 patched
I0812 14:28:03.016] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:03.016] has:Object 'Kind' is missing
I0812 14:28:03.120] generic-resources.sh:319: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:03.332] (Bgeneric-resources.sh:323: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:03.335] (BSuccessful
I0812 14:28:03.336] message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0812 14:28:03.336] pod "busybox0" force deleted
I0812 14:28:03.336] pod "busybox1" force deleted
I0812 14:28:03.337] error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I0812 14:28:03.337] has:Object 'Kind' is missing
I0812 14:28:03.437] generic-resources.sh:328: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:03.600] (Breplicationcontroller/busybox0 created
I0812 14:28:03.605] replicationcontroller/busybox1 created
W0812 14:28:03.707] E0812 14:28:02.151665   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:03.707] E0812 14:28:02.820517   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:03.708] E0812 14:28:02.928858   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:03.708] E0812 14:28:03.044886   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:03.708] E0812 14:28:03.153151   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:03.709] error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0812 14:28:03.709] I0812 14:28:03.605523   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565620078-28763", Name:"busybox0", UID:"43a172df-bf7e-4a01-b9d8-5ac5441de9f6", APIVersion:"v1", ResourceVersion:"978", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-mjg5g
W0812 14:28:03.709] I0812 14:28:03.609692   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565620078-28763", Name:"busybox1", UID:"dcb522cb-cc12-4c21-bed0-7ee007ce870c", APIVersion:"v1", ResourceVersion:"980", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-vg7q9
I0812 14:28:03.810] generic-resources.sh:332: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:03.828] (Bgeneric-resources.sh:337: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:03.932] (Bgeneric-resources.sh:338: Successful get rc busybox0 {{.spec.replicas}}: 1
I0812 14:28:04.035] (Bgeneric-resources.sh:339: Successful get rc busybox1 {{.spec.replicas}}: 1
I0812 14:28:04.242] (Bgeneric-resources.sh:344: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80
I0812 14:28:04.344] (Bgeneric-resources.sh:345: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80
I0812 14:28:04.347] (BSuccessful
I0812 14:28:04.347] message:horizontalpodautoscaler.autoscaling/busybox0 autoscaled
I0812 14:28:04.347] horizontalpodautoscaler.autoscaling/busybox1 autoscaled
I0812 14:28:04.348] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:04.348] has:Object 'Kind' is missing
I0812 14:28:04.436] horizontalpodautoscaler.autoscaling "busybox0" deleted
I0812 14:28:04.525] horizontalpodautoscaler.autoscaling "busybox1" deleted
W0812 14:28:04.626] E0812 14:28:03.822640   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:04.627] E0812 14:28:03.931375   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:04.627] E0812 14:28:04.047162   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:04.628] E0812 14:28:04.154886   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:04.728] generic-resources.sh:353: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:04.742] (Bgeneric-resources.sh:354: Successful get rc busybox0 {{.spec.replicas}}: 1
I0812 14:28:04.842] (Bgeneric-resources.sh:355: Successful get rc busybox1 {{.spec.replicas}}: 1
I0812 14:28:05.044] (Bgeneric-resources.sh:359: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80
I0812 14:28:05.155] (Bgeneric-resources.sh:360: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80
I0812 14:28:05.158] (BSuccessful
I0812 14:28:05.158] message:service/busybox0 exposed
I0812 14:28:05.158] service/busybox1 exposed
I0812 14:28:05.159] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:05.159] has:Object 'Kind' is missing
I0812 14:28:05.256] generic-resources.sh:366: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:05.352] (Bgeneric-resources.sh:367: Successful get rc busybox0 {{.spec.replicas}}: 1
I0812 14:28:05.448] (Bgeneric-resources.sh:368: Successful get rc busybox1 {{.spec.replicas}}: 1
I0812 14:28:05.669] (Bgeneric-resources.sh:372: Successful get rc busybox0 {{.spec.replicas}}: 2
I0812 14:28:05.773] (Bgeneric-resources.sh:373: Successful get rc busybox1 {{.spec.replicas}}: 2
I0812 14:28:05.775] (BSuccessful
I0812 14:28:05.775] message:replicationcontroller/busybox0 scaled
I0812 14:28:05.775] replicationcontroller/busybox1 scaled
I0812 14:28:05.776] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:05.776] has:Object 'Kind' is missing
I0812 14:28:05.876] generic-resources.sh:378: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:06.079] (Bgeneric-resources.sh:382: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:06.081] (BSuccessful
I0812 14:28:06.082] message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0812 14:28:06.082] replicationcontroller "busybox0" force deleted
I0812 14:28:06.082] replicationcontroller "busybox1" force deleted
I0812 14:28:06.083] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:06.083] has:Object 'Kind' is missing
I0812 14:28:06.179] generic-resources.sh:387: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:06.345] (Bdeployment.apps/nginx1-deployment created
I0812 14:28:06.350] deployment.apps/nginx0-deployment created
W0812 14:28:06.451] E0812 14:28:04.824427   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.452] E0812 14:28:04.933380   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.452] E0812 14:28:05.048599   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.452] E0812 14:28:05.156375   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.453] I0812 14:28:05.556949   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565620078-28763", Name:"busybox0", UID:"43a172df-bf7e-4a01-b9d8-5ac5441de9f6", APIVersion:"v1", ResourceVersion:"999", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-lslh5
W0812 14:28:06.453] I0812 14:28:05.568937   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565620078-28763", Name:"busybox1", UID:"dcb522cb-cc12-4c21-bed0-7ee007ce870c", APIVersion:"v1", ResourceVersion:"1004", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-26jmk
W0812 14:28:06.453] E0812 14:28:05.826012   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.453] E0812 14:28:05.935123   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.453] E0812 14:28:06.050205   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.454] E0812 14:28:06.158060   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.454] error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0812 14:28:06.454] I0812 14:28:06.350733   53136 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565620078-28763", Name:"nginx1-deployment", UID:"cced9cf6-533f-499b-844f-33b0e751db6f", APIVersion:"apps/v1", ResourceVersion:"1019", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx1-deployment-84f7f49fb7 to 2
W0812 14:28:06.454] I0812 14:28:06.357094   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620078-28763", Name:"nginx1-deployment-84f7f49fb7", UID:"430b811a-14dc-478e-a7c8-f10f95d50373", APIVersion:"apps/v1", ResourceVersion:"1021", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx1-deployment-84f7f49fb7-p4jbg
W0812 14:28:06.455] I0812 14:28:06.358106   53136 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1565620078-28763", Name:"nginx0-deployment", UID:"0afe1f12-7e41-4a40-8ada-6d0abda1b685", APIVersion:"apps/v1", ResourceVersion:"1020", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx0-deployment-57475bf54d to 2
W0812 14:28:06.455] I0812 14:28:06.364180   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620078-28763", Name:"nginx0-deployment-57475bf54d", UID:"1d91e164-82b4-4193-933d-6da34505e9ea", APIVersion:"apps/v1", ResourceVersion:"1024", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx0-deployment-57475bf54d-nhdkj
W0812 14:28:06.455] I0812 14:28:06.367950   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620078-28763", Name:"nginx0-deployment-57475bf54d", UID:"1d91e164-82b4-4193-933d-6da34505e9ea", APIVersion:"apps/v1", ResourceVersion:"1024", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx0-deployment-57475bf54d-8pg42
W0812 14:28:06.456] I0812 14:28:06.368609   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1565620078-28763", Name:"nginx1-deployment-84f7f49fb7", UID:"430b811a-14dc-478e-a7c8-f10f95d50373", APIVersion:"apps/v1", ResourceVersion:"1021", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx1-deployment-84f7f49fb7-wr69b
I0812 14:28:06.556] generic-resources.sh:391: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment:
I0812 14:28:06.585] (Bgeneric-resources.sh:392: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9:
I0812 14:28:06.812] (Bgeneric-resources.sh:396: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9:
I0812 14:28:06.814] (BSuccessful
I0812 14:28:06.815] message:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1)
I0812 14:28:06.815] deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1)
I0812 14:28:06.816] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0812 14:28:06.816] has:Object 'Kind' is missing
W0812 14:28:06.917] E0812 14:28:06.827747   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:06.938] E0812 14:28:06.937510   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:07.039] deployment.apps/nginx1-deployment paused
I0812 14:28:07.039] deployment.apps/nginx0-deployment paused
I0812 14:28:07.051] generic-resources.sh:404: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true:
I0812 14:28:07.054] (BSuccessful
I0812 14:28:07.054] message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0812 14:28:07.054] has:Object 'Kind' is missing
W0812 14:28:07.155] E0812 14:28:07.051918   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:07.160] E0812 14:28:07.159861   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:07.261] deployment.apps/nginx1-deployment resumed
I0812 14:28:07.261] deployment.apps/nginx0-deployment resumed
I0812 14:28:07.283] generic-resources.sh:410: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: <no value>:<no value>:
I0812 14:28:07.286] (BSuccessful
I0812 14:28:07.287] message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0812 14:28:07.287] has:Object 'Kind' is missing
... skipping 3 lines ...
I0812 14:28:07.405] 1         <none>
I0812 14:28:07.405] 
I0812 14:28:07.405] deployment.apps/nginx0-deployment 
I0812 14:28:07.405] REVISION  CHANGE-CAUSE
I0812 14:28:07.405] 1         <none>
I0812 14:28:07.405] 
I0812 14:28:07.406] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0812 14:28:07.406] has:nginx0-deployment
I0812 14:28:07.407] Successful
I0812 14:28:07.407] message:deployment.apps/nginx1-deployment 
I0812 14:28:07.407] REVISION  CHANGE-CAUSE
I0812 14:28:07.407] 1         <none>
I0812 14:28:07.407] 
I0812 14:28:07.408] deployment.apps/nginx0-deployment 
I0812 14:28:07.408] REVISION  CHANGE-CAUSE
I0812 14:28:07.408] 1         <none>
I0812 14:28:07.408] 
I0812 14:28:07.408] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0812 14:28:07.409] has:nginx1-deployment
I0812 14:28:07.410] Successful
I0812 14:28:07.410] message:deployment.apps/nginx1-deployment 
I0812 14:28:07.410] REVISION  CHANGE-CAUSE
I0812 14:28:07.411] 1         <none>
I0812 14:28:07.411] 
I0812 14:28:07.411] deployment.apps/nginx0-deployment 
I0812 14:28:07.411] REVISION  CHANGE-CAUSE
I0812 14:28:07.411] 1         <none>
I0812 14:28:07.411] 
I0812 14:28:07.411] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
I0812 14:28:07.411] has:Object 'Kind' is missing
I0812 14:28:07.497] deployment.apps "nginx1-deployment" force deleted
I0812 14:28:07.504] deployment.apps "nginx0-deployment" force deleted
W0812 14:28:07.605] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0812 14:28:07.606] error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
W0812 14:28:07.830] E0812 14:28:07.829529   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:07.940] E0812 14:28:07.939322   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:08.054] E0812 14:28:08.053405   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:08.162] E0812 14:28:08.161504   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:08.612] generic-resources.sh:426: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:08.778] (Breplicationcontroller/busybox0 created
I0812 14:28:08.786] replicationcontroller/busybox1 created
W0812 14:28:08.887] I0812 14:28:08.784210   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565620078-28763", Name:"busybox0", UID:"50f18589-7cdd-4095-b08c-f6abb3c84a27", APIVersion:"v1", ResourceVersion:"1068", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-cxzvk
W0812 14:28:08.887] error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
W0812 14:28:08.888] I0812 14:28:08.790329   53136 event.go:255] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1565620078-28763", Name:"busybox1", UID:"6cf511c4-02b2-4b06-8388-30ad60f800d4", APIVersion:"v1", ResourceVersion:"1070", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-g2v75
W0812 14:28:08.888] E0812 14:28:08.831187   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:08.941] E0812 14:28:08.940928   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:09.042] generic-resources.sh:430: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
I0812 14:28:09.043] (BSuccessful
I0812 14:28:09.043] message:no rollbacker has been implemented for "ReplicationController"
I0812 14:28:09.043] no rollbacker has been implemented for "ReplicationController"
I0812 14:28:09.043] unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.044] has:no rollbacker has been implemented for "ReplicationController"
I0812 14:28:09.044] Successful
I0812 14:28:09.044] message:no rollbacker has been implemented for "ReplicationController"
I0812 14:28:09.044] no rollbacker has been implemented for "ReplicationController"
I0812 14:28:09.044] unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.044] has:Object 'Kind' is missing
I0812 14:28:09.113] Successful
I0812 14:28:09.114] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.114] error: replicationcontrollers "busybox0" pausing is not supported
I0812 14:28:09.114] error: replicationcontrollers "busybox1" pausing is not supported
I0812 14:28:09.114] has:Object 'Kind' is missing
I0812 14:28:09.115] Successful
I0812 14:28:09.116] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.116] error: replicationcontrollers "busybox0" pausing is not supported
I0812 14:28:09.116] error: replicationcontrollers "busybox1" pausing is not supported
I0812 14:28:09.116] has:replicationcontrollers "busybox0" pausing is not supported
I0812 14:28:09.118] Successful
I0812 14:28:09.118] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.119] error: replicationcontrollers "busybox0" pausing is not supported
I0812 14:28:09.119] error: replicationcontrollers "busybox1" pausing is not supported
I0812 14:28:09.119] has:replicationcontrollers "busybox1" pausing is not supported
W0812 14:28:09.219] E0812 14:28:09.056168   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:09.220] E0812 14:28:09.163214   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:09.302] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
W0812 14:28:09.318] error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.419] Successful
I0812 14:28:09.420] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.420] error: replicationcontrollers "busybox0" resuming is not supported
I0812 14:28:09.420] error: replicationcontrollers "busybox1" resuming is not supported
I0812 14:28:09.420] has:Object 'Kind' is missing
I0812 14:28:09.420] Successful
I0812 14:28:09.421] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.421] error: replicationcontrollers "busybox0" resuming is not supported
I0812 14:28:09.421] error: replicationcontrollers "busybox1" resuming is not supported
I0812 14:28:09.421] has:replicationcontrollers "busybox0" resuming is not supported
I0812 14:28:09.421] Successful
I0812 14:28:09.421] message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
I0812 14:28:09.422] error: replicationcontrollers "busybox0" resuming is not supported
I0812 14:28:09.422] error: replicationcontrollers "busybox1" resuming is not supported
I0812 14:28:09.422] has:replicationcontrollers "busybox0" resuming is not supported
I0812 14:28:09.422] replicationcontroller "busybox0" force deleted
I0812 14:28:09.422] replicationcontroller "busybox1" force deleted
W0812 14:28:09.833] E0812 14:28:09.833069   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:09.948] E0812 14:28:09.947652   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:10.058] E0812 14:28:10.058245   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:10.166] E0812 14:28:10.165314   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:10.326] Recording: run_namespace_tests
I0812 14:28:10.327] Running command: run_namespace_tests
I0812 14:28:10.353] 
I0812 14:28:10.355] +++ Running case: test-cmd.run_namespace_tests 
I0812 14:28:10.358] +++ working dir: /go/src/k8s.io/kubernetes
I0812 14:28:10.361] +++ command: run_namespace_tests
I0812 14:28:10.373] +++ [0812 14:28:10] Testing kubectl(v1:namespaces)
I0812 14:28:10.458] namespace/my-namespace created
I0812 14:28:10.564] core.sh:1308: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace
I0812 14:28:10.644] (Bnamespace "my-namespace" deleted
W0812 14:28:10.835] E0812 14:28:10.834801   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:10.950] E0812 14:28:10.949743   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:11.061] E0812 14:28:11.060299   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:11.167] E0812 14:28:11.166786   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:11.837] E0812 14:28:11.836364   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:11.953] E0812 14:28:11.952892   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:12.062] E0812 14:28:12.062178   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:12.169] E0812 14:28:12.168570   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:12.838] E0812 14:28:12.838095   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:12.954] E0812 14:28:12.954289   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:13.064] E0812 14:28:13.063867   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:13.171] E0812 14:28:13.170325   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:13.840] E0812 14:28:13.839879   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:13.956] E0812 14:28:13.955785   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:14.066] E0812 14:28:14.066063   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:14.172] E0812 14:28:14.172153   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:14.842] E0812 14:28:14.841436   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:14.958] E0812 14:28:14.957329   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:15.070] E0812 14:28:15.067856   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:15.174] E0812 14:28:15.173721   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:15.616] I0812 14:28:15.615893   53136 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller
W0812 14:28:15.677] I0812 14:28:15.676958   53136 controller_utils.go:1029] Waiting for caches to sync for resource quota controller
W0812 14:28:15.717] I0812 14:28:15.716515   53136 controller_utils.go:1036] Caches are synced for garbage collector controller
W0812 14:28:15.778] I0812 14:28:15.777775   53136 controller_utils.go:1036] Caches are synced for resource quota controller
W0812 14:28:15.843] E0812 14:28:15.842692   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:15.944] namespace/my-namespace condition met
I0812 14:28:15.944] Successful
I0812 14:28:15.944] message:Error from server (NotFound): namespaces "my-namespace" not found
I0812 14:28:15.944] has: not found
I0812 14:28:15.944] namespace/my-namespace created
I0812 14:28:16.035] core.sh:1317: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace
I0812 14:28:16.277] (BSuccessful
I0812 14:28:16.278] message:warning: deleting cluster-scoped resources, not scoped to the provided namespace
I0812 14:28:16.278] namespace "kube-node-lease" deleted
... skipping 29 lines ...
I0812 14:28:16.281] namespace "namespace-1565620043-4275" deleted
I0812 14:28:16.281] namespace "namespace-1565620044-17690" deleted
I0812 14:28:16.281] namespace "namespace-1565620046-3997" deleted
I0812 14:28:16.281] namespace "namespace-1565620048-4523" deleted
I0812 14:28:16.281] namespace "namespace-1565620078-11758" deleted
I0812 14:28:16.281] namespace "namespace-1565620078-28763" deleted
I0812 14:28:16.281] Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted
I0812 14:28:16.282] Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted
I0812 14:28:16.282] Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted
I0812 14:28:16.282] has:warning: deleting cluster-scoped resources
I0812 14:28:16.282] Successful
I0812 14:28:16.282] message:warning: deleting cluster-scoped resources, not scoped to the provided namespace
I0812 14:28:16.282] namespace "kube-node-lease" deleted
I0812 14:28:16.282] namespace "my-namespace" deleted
I0812 14:28:16.282] namespace "namespace-1565619944-27846" deleted
... skipping 27 lines ...
I0812 14:28:16.285] namespace "namespace-1565620043-4275" deleted
I0812 14:28:16.286] namespace "namespace-1565620044-17690" deleted
I0812 14:28:16.286] namespace "namespace-1565620046-3997" deleted
I0812 14:28:16.286] namespace "namespace-1565620048-4523" deleted
I0812 14:28:16.286] namespace "namespace-1565620078-11758" deleted
I0812 14:28:16.286] namespace "namespace-1565620078-28763" deleted
I0812 14:28:16.286] Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted
I0812 14:28:16.286] Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted
I0812 14:28:16.286] Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted
I0812 14:28:16.286] has:namespace "my-namespace" deleted
W0812 14:28:16.387] E0812 14:28:15.960164   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:16.387] E0812 14:28:16.069239   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:16.388] E0812 14:28:16.175418   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:16.488] core.sh:1329: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"other\" }}found{{end}}{{end}}:: :
I0812 14:28:16.489] (Bnamespace/other created
I0812 14:28:16.584] core.sh:1333: Successful get namespaces/other {{.metadata.name}}: other
I0812 14:28:16.686] (Bcore.sh:1337: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:16.860] (Bpod/valid-pod created
I0812 14:28:16.966] core.sh:1341: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:28:17.070] (Bcore.sh:1343: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:28:17.160] (BSuccessful
I0812 14:28:17.160] message:error: a resource cannot be retrieved by name across all namespaces
I0812 14:28:17.160] has:a resource cannot be retrieved by name across all namespaces
I0812 14:28:17.263] core.sh:1350: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
I0812 14:28:17.352] (Bpod "valid-pod" force deleted
W0812 14:28:17.453] E0812 14:28:16.844359   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:17.454] E0812 14:28:16.961667   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:17.454] E0812 14:28:17.070975   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:17.455] E0812 14:28:17.176874   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:17.455] warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
I0812 14:28:17.556] core.sh:1354: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:17.556] (Bnamespace "other" deleted
W0812 14:28:17.847] E0812 14:28:17.846258   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:17.964] E0812 14:28:17.963802   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:18.073] E0812 14:28:18.073002   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:18.179] E0812 14:28:18.178665   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:18.848] E0812 14:28:18.847968   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:18.966] E0812 14:28:18.965239   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:19.075] E0812 14:28:19.074684   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:19.120] I0812 14:28:19.120194   53136 horizontal.go:341] Horizontal Pod Autoscaler busybox0 has been deleted in namespace-1565620078-28763
W0812 14:28:19.125] I0812 14:28:19.124908   53136 horizontal.go:341] Horizontal Pod Autoscaler busybox1 has been deleted in namespace-1565620078-28763
W0812 14:28:19.181] E0812 14:28:19.180443   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:19.850] E0812 14:28:19.849987   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:19.967] E0812 14:28:19.967008   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:20.077] E0812 14:28:20.076270   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:20.182] E0812 14:28:20.182159   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:20.852] E0812 14:28:20.851523   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:20.969] E0812 14:28:20.968228   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:21.079] E0812 14:28:21.078301   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:21.185] E0812 14:28:21.184506   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:21.858] E0812 14:28:21.857033   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:21.970] E0812 14:28:21.969975   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:22.080] E0812 14:28:22.079432   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:22.188] E0812 14:28:22.187234   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:22.672] +++ exit code: 0
I0812 14:28:22.714] Recording: run_secrets_test
I0812 14:28:22.714] Running command: run_secrets_test
I0812 14:28:22.742] 
I0812 14:28:22.744] +++ Running case: test-cmd.run_secrets_test 
I0812 14:28:22.747] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 57 lines ...
I0812 14:28:24.693] (Bcore.sh:767: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/tls
I0812 14:28:24.767] (Bsecret "test-secret" deleted
I0812 14:28:24.850] secret/test-secret created
I0812 14:28:24.941] core.sh:773: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret
I0812 14:28:25.029] (Bcore.sh:774: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/tls
I0812 14:28:25.103] (Bsecret "test-secret" deleted
W0812 14:28:25.204] E0812 14:28:22.858582   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.204] E0812 14:28:22.971487   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.204] I0812 14:28:23.015148   70111 loader.go:375] Config loaded from file:  /tmp/tmp.QAZctyhJp0/.kube/config
W0812 14:28:25.205] E0812 14:28:23.081000   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.205] E0812 14:28:23.188935   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.205] E0812 14:28:23.860397   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.206] E0812 14:28:23.973073   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.206] E0812 14:28:24.082448   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.207] E0812 14:28:24.190349   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.207] E0812 14:28:24.861814   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.207] E0812 14:28:24.974758   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.207] E0812 14:28:25.083827   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:25.207] E0812 14:28:25.192194   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:25.308] secret/secret-string-data created
I0812 14:28:25.377] core.sh:796: Successful get secret/secret-string-data --namespace=test-secrets  {{.data}}: map[k1:djE= k2:djI=]
I0812 14:28:25.464] (Bcore.sh:797: Successful get secret/secret-string-data --namespace=test-secrets  {{.data}}: map[k1:djE= k2:djI=]
I0812 14:28:25.549] (Bcore.sh:798: Successful get secret/secret-string-data --namespace=test-secrets  {{.stringData}}: <no value>
I0812 14:28:25.625] (Bsecret "secret-string-data" deleted
I0812 14:28:25.726] core.sh:807: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:25.880] (Bsecret "test-secret" deleted
I0812 14:28:25.960] namespace "test-secrets" deleted
W0812 14:28:26.061] E0812 14:28:25.863325   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:26.062] E0812 14:28:25.976375   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:26.085] E0812 14:28:26.085196   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:26.194] E0812 14:28:26.193612   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:26.865] E0812 14:28:26.864981   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:26.978] E0812 14:28:26.977840   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:27.087] E0812 14:28:27.087018   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:27.196] E0812 14:28:27.196252   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:27.867] E0812 14:28:27.867122   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:27.980] E0812 14:28:27.979820   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:28.089] E0812 14:28:28.088462   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:28.198] E0812 14:28:28.197716   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:28.870] E0812 14:28:28.869764   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:28.981] E0812 14:28:28.981247   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:29.090] E0812 14:28:29.089911   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:29.199] E0812 14:28:29.199169   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:29.872] E0812 14:28:29.871436   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:29.983] E0812 14:28:29.982609   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:30.092] E0812 14:28:30.091491   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:30.201] E0812 14:28:30.200742   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:30.874] E0812 14:28:30.873303   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:30.984] E0812 14:28:30.983654   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:31.086] +++ exit code: 0
I0812 14:28:31.129] Recording: run_configmap_tests
I0812 14:28:31.129] Running command: run_configmap_tests
I0812 14:28:31.150] 
I0812 14:28:31.152] +++ Running case: test-cmd.run_configmap_tests 
I0812 14:28:31.154] +++ working dir: /go/src/k8s.io/kubernetes
I0812 14:28:31.157] +++ command: run_configmap_tests
I0812 14:28:31.169] +++ [0812 14:28:31] Creating namespace namespace-1565620111-6915
I0812 14:28:31.247] namespace/namespace-1565620111-6915 created
I0812 14:28:31.322] Context "test" modified.
I0812 14:28:31.327] +++ [0812 14:28:31] Testing configmaps
W0812 14:28:31.427] E0812 14:28:31.093681   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:31.428] E0812 14:28:31.202184   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:31.529] configmap/test-configmap created
I0812 14:28:31.621] core.sh:28: Successful get configmap/test-configmap {{.metadata.name}}: test-configmap
I0812 14:28:31.717] (Bconfigmap "test-configmap" deleted
I0812 14:28:31.818] core.sh:33: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-configmaps\" }}found{{end}}{{end}}:: :
I0812 14:28:31.891] (Bnamespace/test-configmaps created
I0812 14:28:31.993] core.sh:37: Successful get namespaces/test-configmaps {{.metadata.name}}: test-configmaps
... skipping 3 lines ...
I0812 14:28:32.328] configmap/test-binary-configmap created
I0812 14:28:32.426] core.sh:48: Successful get configmap/test-configmap --namespace=test-configmaps {{.metadata.name}}: test-configmap
I0812 14:28:32.513] (Bcore.sh:49: Successful get configmap/test-binary-configmap --namespace=test-configmaps {{.metadata.name}}: test-binary-configmap
I0812 14:28:32.772] (Bconfigmap "test-configmap" deleted
I0812 14:28:32.858] configmap "test-binary-configmap" deleted
I0812 14:28:32.938] namespace "test-configmaps" deleted
W0812 14:28:33.039] E0812 14:28:31.875093   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.040] E0812 14:28:31.985105   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.041] E0812 14:28:32.095417   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.041] E0812 14:28:32.203644   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.041] E0812 14:28:32.876468   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.042] E0812 14:28:32.986494   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.097] E0812 14:28:33.096954   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.205] E0812 14:28:33.204919   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.879] E0812 14:28:33.878229   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:33.988] E0812 14:28:33.987987   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:34.099] E0812 14:28:34.098769   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:34.207] E0812 14:28:34.206611   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:34.880] E0812 14:28:34.880039   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:34.990] E0812 14:28:34.989734   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:35.101] E0812 14:28:35.100405   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:35.209] E0812 14:28:35.208445   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:35.882] E0812 14:28:35.882041   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:35.993] E0812 14:28:35.992682   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:36.103] E0812 14:28:36.102255   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:36.210] E0812 14:28:36.210110   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:36.884] E0812 14:28:36.883710   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:36.995] E0812 14:28:36.994440   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:37.104] E0812 14:28:37.103884   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:37.212] E0812 14:28:37.211897   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:37.886] E0812 14:28:37.885535   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:37.996] E0812 14:28:37.995632   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:38.097] +++ exit code: 0
I0812 14:28:38.100] Recording: run_client_config_tests
I0812 14:28:38.100] Running command: run_client_config_tests
I0812 14:28:38.128] 
I0812 14:28:38.131] +++ Running case: test-cmd.run_client_config_tests 
I0812 14:28:38.134] +++ working dir: /go/src/k8s.io/kubernetes
I0812 14:28:38.137] +++ command: run_client_config_tests
I0812 14:28:38.153] +++ [0812 14:28:38] Creating namespace namespace-1565620118-2378
I0812 14:28:38.233] namespace/namespace-1565620118-2378 created
I0812 14:28:38.308] Context "test" modified.
I0812 14:28:38.317] +++ [0812 14:28:38] Testing client config
I0812 14:28:38.392] Successful
I0812 14:28:38.392] message:error: stat missing: no such file or directory
I0812 14:28:38.392] has:missing: no such file or directory
I0812 14:28:38.479] Successful
I0812 14:28:38.480] message:error: stat missing: no such file or directory
I0812 14:28:38.480] has:missing: no such file or directory
I0812 14:28:38.558] Successful
I0812 14:28:38.559] message:error: stat missing: no such file or directory
I0812 14:28:38.559] has:missing: no such file or directory
I0812 14:28:38.639] Successful
I0812 14:28:38.640] message:Error in configuration: context was not found for specified context: missing-context
I0812 14:28:38.640] has:context was not found for specified context: missing-context
I0812 14:28:38.717] Successful
I0812 14:28:38.717] message:error: no server found for cluster "missing-cluster"
I0812 14:28:38.718] has:no server found for cluster "missing-cluster"
I0812 14:28:38.795] Successful
I0812 14:28:38.796] message:error: auth info "missing-user" does not exist
I0812 14:28:38.796] has:auth info "missing-user" does not exist
W0812 14:28:38.897] E0812 14:28:38.106106   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:38.898] E0812 14:28:38.213488   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:38.898] E0812 14:28:38.887266   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:38.998] E0812 14:28:38.997656   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:39.099] Successful
I0812 14:28:39.100] message:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50"
I0812 14:28:39.100] has:error loading config file
I0812 14:28:39.100] Successful
I0812 14:28:39.100] message:error: stat missing-config: no such file or directory
I0812 14:28:39.100] has:no such file or directory
I0812 14:28:39.100] +++ exit code: 0
I0812 14:28:39.100] Recording: run_service_accounts_tests
I0812 14:28:39.101] Running command: run_service_accounts_tests
I0812 14:28:39.117] 
I0812 14:28:39.119] +++ Running case: test-cmd.run_service_accounts_tests 
... skipping 7 lines ...
I0812 14:28:39.498] (Bnamespace/test-service-accounts created
I0812 14:28:39.603] core.sh:832: Successful get namespaces/test-service-accounts {{.metadata.name}}: test-service-accounts
I0812 14:28:39.685] (Bserviceaccount/test-service-account created
I0812 14:28:39.788] core.sh:838: Successful get serviceaccount/test-service-account --namespace=test-service-accounts {{.metadata.name}}: test-service-account
I0812 14:28:39.875] (Bserviceaccount "test-service-account" deleted
I0812 14:28:39.970] namespace "test-service-accounts" deleted
W0812 14:28:40.071] E0812 14:28:39.107813   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:40.071] E0812 14:28:39.214907   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:40.071] E0812 14:28:39.889376   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:40.072] E0812 14:28:39.999336   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:40.111] E0812 14:28:40.110051   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:40.217] E0812 14:28:40.216542   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:40.892] E0812 14:28:40.891360   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:41.001] E0812 14:28:41.001033   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:41.113] E0812 14:28:41.112515   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:41.218] E0812 14:28:41.218121   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:41.894] E0812 14:28:41.893141   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:42.003] E0812 14:28:42.002845   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:42.115] E0812 14:28:42.114392   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:42.220] E0812 14:28:42.219848   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:42.896] E0812 14:28:42.895510   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:43.005] E0812 14:28:43.004602   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:43.117] E0812 14:28:43.116263   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:43.222] E0812 14:28:43.221976   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:43.898] E0812 14:28:43.898155   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:44.007] E0812 14:28:44.006455   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:44.118] E0812 14:28:44.118111   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:44.224] E0812 14:28:44.223858   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:44.900] E0812 14:28:44.899804   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:45.008] E0812 14:28:45.007990   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:45.109] +++ exit code: 0
I0812 14:28:45.152] Recording: run_job_tests
I0812 14:28:45.152] Running command: run_job_tests
I0812 14:28:45.177] 
I0812 14:28:45.179] +++ Running case: test-cmd.run_job_tests 
I0812 14:28:45.182] +++ working dir: /go/src/k8s.io/kubernetes
... skipping 3 lines ...
I0812 14:28:45.360] Context "test" modified.
I0812 14:28:45.370] +++ [0812 14:28:45] Testing job
I0812 14:28:45.472] batch.sh:30: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-jobs\" }}found{{end}}{{end}}:: :
I0812 14:28:45.548] (Bnamespace/test-jobs created
I0812 14:28:45.656] batch.sh:34: Successful get namespaces/test-jobs {{.metadata.name}}: test-jobs
I0812 14:28:45.752] (Bcronjob.batch/pi created
W0812 14:28:45.853] E0812 14:28:45.119671   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:45.853] E0812 14:28:45.225427   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:45.854] kubectl run --generator=cronjob/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0812 14:28:45.902] E0812 14:28:45.901662   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:46.003] batch.sh:39: Successful get cronjob/pi --namespace=test-jobs {{.metadata.name}}: pi
I0812 14:28:46.004] (BNAME   SCHEDULE       SUSPEND   ACTIVE   LAST SCHEDULE   AGE
I0812 14:28:46.004] pi     59 23 31 2 *   False     0        <none>          0s
I0812 14:28:46.048] Name:                          pi
I0812 14:28:46.048] Namespace:                     test-jobs
I0812 14:28:46.049] Labels:                        run=pi
I0812 14:28:46.049] Annotations:                   <none>
I0812 14:28:46.049] Schedule:                      59 23 31 2 *
I0812 14:28:46.049] Concurrency Policy:            Allow
I0812 14:28:46.050] Suspend:                       False
I0812 14:28:46.050] Successful Job History Limit:  3
I0812 14:28:46.050] Failed Job History Limit:      1
I0812 14:28:46.050] Starting Deadline Seconds:     <unset>
I0812 14:28:46.050] Selector:                      <unset>
I0812 14:28:46.050] Parallelism:                   <unset>
I0812 14:28:46.050] Completions:                   <unset>
I0812 14:28:46.050] Pod Template:
I0812 14:28:46.050]   Labels:  run=pi
... skipping 18 lines ...
I0812 14:28:46.052] Events:              <none>
I0812 14:28:46.150] Successful
I0812 14:28:46.150] message:job.batch/test-job
I0812 14:28:46.150] has:job.batch/test-job
I0812 14:28:46.252] batch.sh:48: Successful get jobs {{range.items}}{{.metadata.name}}{{end}}: 
I0812 14:28:46.361] (Bjob.batch/test-job created
W0812 14:28:46.462] E0812 14:28:46.009862   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:46.463] E0812 14:28:46.121330   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:46.463] E0812 14:28:46.227255   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:46.463] I0812 14:28:46.357036   53136 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"test-jobs", Name:"test-job", UID:"827116ae-8e5a-4f1f-be4e-85dfa332fed2", APIVersion:"batch/v1", ResourceVersion:"1350", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-dk7fk
I0812 14:28:46.564] batch.sh:53: Successful get job/test-job --namespace=test-jobs {{.metadata.name}}: test-job
I0812 14:28:46.565] (BNAME       COMPLETIONS   DURATION   AGE
I0812 14:28:46.565] test-job   0/1           0s         0s
I0812 14:28:46.661] Name:           test-job
I0812 14:28:46.661] Namespace:      test-jobs
... skipping 3 lines ...
I0812 14:28:46.662]                 run=pi
I0812 14:28:46.662] Annotations:    cronjob.kubernetes.io/instantiate: manual
I0812 14:28:46.662] Controlled By:  CronJob/pi
I0812 14:28:46.662] Parallelism:    1
I0812 14:28:46.662] Completions:    1
I0812 14:28:46.662] Start Time:     Mon, 12 Aug 2019 14:28:46 +0000
I0812 14:28:46.663] Pods Statuses:  1 Running / 0 Succeeded / 0 Failed
I0812 14:28:46.663] Pod Template:
I0812 14:28:46.663]   Labels:  controller-uid=827116ae-8e5a-4f1f-be4e-85dfa332fed2
I0812 14:28:46.663]            job-name=test-job
I0812 14:28:46.663]            run=pi
I0812 14:28:46.663]   Containers:
I0812 14:28:46.663]    pi:
... skipping 15 lines ...
I0812 14:28:46.666]   Type    Reason            Age   From            Message
I0812 14:28:46.666]   ----    ------            ----  ----            -------
I0812 14:28:46.666]   Normal  SuccessfulCreate  0s    job-controller  Created pod: test-job-dk7fk
I0812 14:28:46.753] job.batch "test-job" deleted
I0812 14:28:46.844] cronjob.batch "pi" deleted
I0812 14:28:46.934] namespace "test-jobs" deleted
W0812 14:28:47.035] E0812 14:28:46.903195   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:47.035] E0812 14:28:47.011709   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:47.124] E0812 14:28:47.123340   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:47.230] E0812 14:28:47.229419   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:47.906] E0812 14:28:47.905101   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:48.014] E0812 14:28:48.013540   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:48.126] E0812 14:28:48.125130   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:48.232] E0812 14:28:48.231473   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:48.908] E0812 14:28:48.907211   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:49.016] E0812 14:28:49.015326   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:49.127] E0812 14:28:49.126936   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:49.233] E0812 14:28:49.233210   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:49.909] E0812 14:28:49.909132   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:50.017] E0812 14:28:50.017159   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:50.129] E0812 14:28:50.128866   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:50.235] E0812 14:28:50.235048   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:50.912] E0812 14:28:50.911227   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:51.019] E0812 14:28:51.019087   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:51.132] E0812 14:28:51.131294   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:51.237] E0812 14:28:51.236681   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:51.913] E0812 14:28:51.912909   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:52.021] E0812 14:28:52.020340   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:52.121] +++ exit code: 0
I0812 14:28:52.122] Recording: run_create_job_tests
I0812 14:28:52.122] Running command: run_create_job_tests
I0812 14:28:52.145] 
I0812 14:28:52.149] +++ Running case: test-cmd.run_create_job_tests 
I0812 14:28:52.154] +++ working dir: /go/src/k8s.io/kubernetes
I0812 14:28:52.157] +++ command: run_create_job_tests
I0812 14:28:52.174] +++ [0812 14:28:52] Creating namespace namespace-1565620132-30071
I0812 14:28:52.260] namespace/namespace-1565620132-30071 created
I0812 14:28:52.343] Context "test" modified.
I0812 14:28:52.443] job.batch/test-job created
I0812 14:28:52.549] create.sh:86: Successful get job test-job {{(index .spec.template.spec.containers 0).image}}: k8s.gcr.io/nginx:test-cmd
I0812 14:28:52.638] (Bjob.batch "test-job" deleted
W0812 14:28:52.739] E0812 14:28:52.132750   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:52.740] E0812 14:28:52.238512   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:52.740] I0812 14:28:52.437956   53136 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1565620132-30071", Name:"test-job", UID:"1ff6d1c7-ef35-4c3d-9b0f-9c5306090a6b", APIVersion:"batch/v1", ResourceVersion:"1368", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-98wjj
W0812 14:28:52.741] I0812 14:28:52.739985   53136 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1565620132-30071", Name:"test-job-pi", UID:"a984f58b-abbc-4ed2-b1dd-dd250a583cf6", APIVersion:"batch/v1", ResourceVersion:"1375", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-pi-kg4p7
I0812 14:28:52.841] job.batch/test-job-pi created
I0812 14:28:52.858] create.sh:92: Successful get job test-job-pi {{(index .spec.template.spec.containers 0).image}}: k8s.gcr.io/perl
I0812 14:28:52.945] (Bjob.batch "test-job-pi" deleted
W0812 14:28:53.046] E0812 14:28:52.914687   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:53.046] E0812 14:28:53.022033   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:53.046] kubectl run --generator=cronjob/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0812 14:28:53.135] E0812 14:28:53.134176   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:53.149] I0812 14:28:53.148368   53136 event.go:255] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1565620132-30071", Name:"my-pi", UID:"06ec00c8-ec49-48e7-93d8-9a78aa8e0edc", APIVersion:"batch/v1", ResourceVersion:"1384", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: my-pi-2qwsv
W0812 14:28:53.240] E0812 14:28:53.240067   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:53.341] cronjob.batch/test-pi created
I0812 14:28:53.342] job.batch/my-pi created
I0812 14:28:53.342] Successful
I0812 14:28:53.342] message:[perl -Mbignum=bpi -wle print bpi(10)]
I0812 14:28:53.342] has:perl -Mbignum=bpi -wle print bpi(10)
I0812 14:28:53.349] job.batch "my-pi" deleted
... skipping 8 lines ...
I0812 14:28:53.559] +++ [0812 14:28:53] Creating namespace namespace-1565620133-10942
I0812 14:28:53.639] namespace/namespace-1565620133-10942 created
I0812 14:28:53.719] Context "test" modified.
I0812 14:28:53.728] +++ [0812 14:28:53] Testing pod templates
I0812 14:28:53.829] core.sh:1415: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:28:53.999] (Bpodtemplate/nginx created
W0812 14:28:54.099] E0812 14:28:53.916402   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:54.100] I0812 14:28:53.995894   49693 controller.go:606] quota admission added evaluator for: podtemplates
W0812 14:28:54.100] E0812 14:28:54.024393   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:54.141] E0812 14:28:54.141045   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:54.242] E0812 14:28:54.241628   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:54.343] core.sh:1419: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0812 14:28:54.343] (BNAME    CONTAINERS   IMAGES   POD LABELS
I0812 14:28:54.344] nginx   nginx        nginx    name=nginx
I0812 14:28:54.404] core.sh:1427: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx:
I0812 14:28:54.483] (Bpodtemplate "nginx" deleted
I0812 14:28:54.591] core.sh:1431: Successful get podtemplate {{range.items}}{{.metadata.name}}:{{end}}: 
... skipping 5 lines ...
I0812 14:28:54.676] +++ working dir: /go/src/k8s.io/kubernetes
I0812 14:28:54.679] +++ command: run_service_tests
I0812 14:28:54.761] Context "test" modified.
I0812 14:28:54.770] +++ [0812 14:28:54] Testing kubectl(v1:services)
I0812 14:28:54.868] core.sh:858: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0812 14:28:55.029] (Bservice/redis-master created
W0812 14:28:55.131] E0812 14:28:54.918016   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:55.132] E0812 14:28:55.025696   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:55.143] E0812 14:28:55.142522   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:55.243] E0812 14:28:55.243232   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:55.344] core.sh:862: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
I0812 14:28:55.345] (Bcore.sh:864: Successful describe services redis-master:
I0812 14:28:55.345] Name:              redis-master
I0812 14:28:55.345] Namespace:         default
I0812 14:28:55.345] Labels:            app=redis
I0812 14:28:55.345]                    role=master
... skipping 240 lines ...
I0812 14:28:56.389]     role: padawan
I0812 14:28:56.389]   sessionAffinity: None
I0812 14:28:56.389]   type: ClusterIP
I0812 14:28:56.390] status:
I0812 14:28:56.390]   loadBalancer: {}
I0812 14:28:56.477] service/redis-master selector updated
W0812 14:28:56.579] E0812 14:28:55.921155   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:56.580] E0812 14:28:56.027456   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:56.580] E0812 14:28:56.144344   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:56.580] E0812 14:28:56.245093   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:56.681] core.sh:890: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: padawan:
I0812 14:28:56.681] (Bservice/redis-master selector updated
I0812 14:28:56.766] core.sh:894: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend:
I0812 14:28:56.846] (BapiVersion: v1
I0812 14:28:56.846] kind: Service
I0812 14:28:56.846] metadata:
... skipping 49 lines ...
I0812 14:28:56.850]   selector:
I0812 14:28:56.850]     role: padawan
I0812 14:28:56.850]   sessionAffinity: None
I0812 14:28:56.850]   type: ClusterIP
I0812 14:28:56.850] status:
I0812 14:28:56.850]   loadBalancer: {}
W0812 14:28:56.951] error: you must specify resources by --filename when --local is set.
W0812 14:28:56.951] Example resource specifications include:
W0812 14:28:56.952]    '-f rsrc.yaml'
W0812 14:28:56.952]    '--filename=rsrc.json'
W0812 14:28:56.952] E0812 14:28:56.922741   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:57.029] E0812 14:28:57.028946   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:57.130] core.sh:898: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend:
I0812 14:28:57.204] (Bcore.sh:905: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
I0812 14:28:57.290] (Bservice "redis-master" deleted
W0812 14:28:57.391] E0812 14:28:57.146048   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:57.392] E0812 14:28:57.246817   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:57.492] core.sh:912: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0812 14:28:57.503] (Bcore.sh:916: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0812 14:28:57.678] (Bservice/redis-master created
I0812 14:28:57.786] core.sh:920: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
I0812 14:28:57.886] (Bcore.sh:924: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
I0812 14:28:58.056] (Bservice/service-v1-test created
W0812 14:28:58.157] E0812 14:28:57.924326   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:58.157] E0812 14:28:58.030572   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:58.158] E0812 14:28:58.147536   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:58.249] E0812 14:28:58.248650   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:58.350] core.sh:945: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test:
I0812 14:28:58.351] (Bservice/service-v1-test replaced
I0812 14:28:58.451] core.sh:952: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test:
I0812 14:28:58.544] (Bservice "redis-master" deleted
I0812 14:28:58.645] service "service-v1-test" deleted
I0812 14:28:58.758] core.sh:960: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0812 14:28:58.859] (Bcore.sh:964: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0812 14:28:59.023] (Bservice/redis-master created
W0812 14:28:59.124] E0812 14:28:58.925940   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:59.124] E0812 14:28:59.032364   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:28:59.149] E0812 14:28:59.149100   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:59.250] service/redis-slave created
I0812 14:28:59.330] core.sh:969: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:redis-slave:
I0812 14:28:59.428] (BSuccessful
I0812 14:28:59.429] message:NAME           RSRC
I0812 14:28:59.429] kubernetes     144
I0812 14:28:59.429] redis-master   1417
I0812 14:28:59.429] redis-slave    1423
I0812 14:28:59.429] has:redis-master
W0812 14:28:59.530] E0812 14:28:59.250381   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:28:59.630] core.sh:979: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:redis-slave:
I0812 14:28:59.631] (Bservice "redis-master" deleted
I0812 14:28:59.636] service "redis-slave" deleted
I0812 14:28:59.748] core.sh:986: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0812 14:28:59.851] (Bcore.sh:990: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0812 14:28:59.937] (Bservice/beep-boop created
W0812 14:29:00.038] E0812 14:28:59.928334   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:00.038] E0812 14:29:00.033846   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:29:00.139] core.sh:994: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: beep-boop:kubernetes:
I0812 14:29:00.161] (Bcore.sh:998: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: beep-boop:kubernetes:
I0812 14:29:00.253] (Bservice "beep-boop" deleted
W0812 14:29:00.354] E0812 14:29:00.150986   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:00.355] E0812 14:29:00.253737   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:29:00.456] core.sh:1005: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
I0812 14:29:00.469] (Bcore.sh:1009: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:29:00.588] (Bservice/testmetadata created
I0812 14:29:00.588] deployment.apps/testmetadata created
W0812 14:29:00.689] kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
W0812 14:29:00.689] I0812 14:29:00.571829   53136 event.go:255] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"testmetadata", UID:"f18d2a00-8e36-440a-986b-84f6a23b4446", APIVersion:"apps/v1", ResourceVersion:"1435", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set testmetadata-6cdd84c77d to 2
... skipping 2 lines ...
I0812 14:29:00.791] core.sh:1013: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: testmetadata:
I0812 14:29:00.802] (Bcore.sh:1014: Successful get service testmetadata {{.metadata.annotations}}: map[zone-context:home]
I0812 14:29:00.903] (Bservice/exposemetadata exposed
I0812 14:29:01.010] core.sh:1020: Successful get service exposemetadata {{.metadata.annotations}}: map[zone-context:work]
I0812 14:29:01.100] (Bservice "exposemetadata" deleted
I0812 14:29:01.110] service "testmetadata" deleted
W0812 14:29:01.211] E0812 14:29:00.930390   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:01.211] E0812 14:29:01.035478   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:01.211] E0812 14:29:01.152707   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:01.256] E0812 14:29:01.255267   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:29:01.356] deployment.apps "testmetadata" deleted
I0812 14:29:01.357] +++ exit code: 0
I0812 14:29:01.357] Recording: run_daemonset_tests
I0812 14:29:01.357] Running command: run_daemonset_tests
I0812 14:29:01.357] 
I0812 14:29:01.357] +++ Running case: test-cmd.run_daemonset_tests 
... skipping 4 lines ...
I0812 14:29:01.515] Context "test" modified.
I0812 14:29:01.524] +++ [0812 14:29:01] Testing kubectl(v1:daemonsets)
I0812 14:29:01.629] apps.sh:30: Successful get daemonsets {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:29:01.826] (Bdaemonset.apps/bind created
W0812 14:29:01.927] I0812 14:29:01.822383   49693 controller.go:606] quota admission added evaluator for: daemonsets.apps
W0812 14:29:01.927] I0812 14:29:01.835509   49693 controller.go:606] quota admission added evaluator for: controllerrevisions.apps
W0812 14:29:01.935] E0812 14:29:01.934804   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:29:02.036] apps.sh:34: Successful get daemonsets bind {{.metadata.generation}}: 1
I0812 14:29:02.131] (Bdaemonset.apps/bind configured
W0812 14:29:02.232] E0812 14:29:02.037076   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:02.232] E0812 14:29:02.154413   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:02.257] E0812 14:29:02.256782   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:29:02.358] apps.sh:37: Successful get daemonsets bind {{.metadata.generation}}: 1
I0812 14:29:02.358] (Bdaemonset.apps/bind image updated
I0812 14:29:02.457] apps.sh:40: Successful get daemonsets bind {{.metadata.generation}}: 2
I0812 14:29:02.555] (Bdaemonset.apps/bind env updated
I0812 14:29:02.663] apps.sh:42: Successful get daemonsets bind {{.metadata.generation}}: 3
I0812 14:29:02.764] (Bdaemonset.apps/bind resource requirements updated
I0812 14:29:02.873] apps.sh:44: Successful get daemonsets bind {{.metadata.generation}}: 4
I0812 14:29:02.979] (Bdaemonset.apps/bind restarted
W0812 14:29:03.081] E0812 14:29:02.936735   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:03.081] E0812 14:29:03.038813   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:03.157] E0812 14:29:03.156378   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:29:03.258] apps.sh:48: Successful get daemonsets bind {{.metadata.generation}}: 5
I0812 14:29:03.258] (Bdaemonset.apps "bind" deleted
I0812 14:29:03.258] +++ exit code: 0
I0812 14:29:03.259] Recording: run_daemonset_history_tests
I0812 14:29:03.259] Running command: run_daemonset_history_tests
I0812 14:29:03.282] 
... skipping 3 lines ...
I0812 14:29:03.307] +++ [0812 14:29:03] Creating namespace namespace-1565620143-5469
I0812 14:29:03.390] namespace/namespace-1565620143-5469 created
I0812 14:29:03.462] Context "test" modified.
I0812 14:29:03.470] +++ [0812 14:29:03] Testing kubectl(v1:daemonsets, v1:controllerrevisions)
I0812 14:29:03.568] apps.sh:66: Successful get daemonsets {{range.items}}{{.metadata.name}}:{{end}}: 
I0812 14:29:03.751] (Bdaemonset.apps/bind created
W0812 14:29:03.852] E0812 14:29:03.258651   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:03.940] E0812 14:29:03.939435   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:29:04.041] apps.sh:70: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:1 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1565620143-5469"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:2.0","name":"kubernetes-pause"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}}
I0812 14:29:04.042]  kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true]:
I0812 14:29:04.042] (Bdaemonset.apps/bind skipped rollback (current template already matches revision 1)
I0812 14:29:04.089] apps.sh:73: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0:
I0812 14:29:04.196] (Bapps.sh:74: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1
I0812 14:29:04.376] (Bdaemonset.apps/bind configured
W0812 14:29:04.477] E0812 14:29:04.041091   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:04.478] E0812 14:29:04.158121   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 14:29:04.478] E0812 14:29:04.260192   53136 reflector.go:125] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 14:29:04.579] apps.sh:77: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest:
I0812 14:29:04.595] (Bapps.sh:78: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
I0812 14:29:04.698] (Bapps.sh:79: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2
I0812 14:29:04.807] (Bapps.sh:80: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:2 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1565620143-5469"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s