This job view page is being replaced by Spyglass soon. Check out the new job view.
ResultFAILURE
Tests 1 failed / 675 succeeded
Started2019-11-09 03:17
Elapsed30m4s
Revision
Buildergke-prow-ssd-pool-1a225945-5dqn
Refs release-1.14:f2439bda
82673:46a52cc9
84317:e68467d5
links{u'resultstore': {u'url': u'https://source.cloud.google.com/results/invocations/b4599208-2374-40ec-bd1d-0eefd193b2a1/targets/test'}}
pod4c5b25b2-029f-11ea-a29f-7ef3f345d1b5
resultstorehttps://source.cloud.google.com/results/invocations/b4599208-2374-40ec-bd1d-0eefd193b2a1/targets/test
infra-commitcf0351102
pod4c5b25b2-029f-11ea-a29f-7ef3f345d1b5
repok8s.io/kubernetes
repo-commit3612fecdcbebefcf998d227e22ae435a4377525e
repos{u'k8s.io/kubernetes': u'release-1.14:f2439bdab674a59a596dc49de2b94d42aa77bd8c,82673:46a52cc96ac26311c8a91ab6aae866736b563bf6,84317:e68467d55a792837ffa53ada0e26ab205c83120b'}

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestPreemptionRaces 32s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestPreemptionRaces$
I1109 03:38:47.750504  106211 services.go:33] Network range for service cluster IPs is unspecified. Defaulting to {10.0.0.0 ffffff00}.
I1109 03:38:47.750533  106211 services.go:45] Setting service IP to "10.0.0.1" (read-write).
I1109 03:38:47.750544  106211 master.go:277] Node port range unspecified. Defaulting to 30000-32767.
I1109 03:38:47.750554  106211 master.go:233] Using reconciler: 
I1109 03:38:47.752840  106211 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.752952  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.752991  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.753033  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.753117  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.753957  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.754000  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.754156  106211 store.go:1319] Monitoring podtemplates count at <storage-prefix>//podtemplates
I1109 03:38:47.754200  106211 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.754235  106211 reflector.go:161] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I1109 03:38:47.754465  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.754492  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.754531  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.754618  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.755009  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.755084  106211 store.go:1319] Monitoring events count at <storage-prefix>//events
I1109 03:38:47.755160  106211 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.755329  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.755374  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.755425  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.755516  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.755717  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.756073  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.756332  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.756556  106211 store.go:1319] Monitoring limitranges count at <storage-prefix>//limitranges
I1109 03:38:47.756564  106211 reflector.go:161] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I1109 03:38:47.756589  106211 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.756673  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.756684  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.756751  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.756796  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.756987  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.757072  106211 store.go:1319] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I1109 03:38:47.757324  106211 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.757412  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.757425  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.757454  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.757488  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.757516  106211 reflector.go:161] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I1109 03:38:47.757725  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.757990  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.758095  106211 store.go:1319] Monitoring secrets count at <storage-prefix>//secrets
I1109 03:38:47.758344  106211 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.758407  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.758424  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.758491  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.758545  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.758573  106211 reflector.go:161] Listing and watching *core.Secret from storage/cacher.go:/secrets
I1109 03:38:47.758718  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.759021  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.759125  106211 store.go:1319] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I1109 03:38:47.759316  106211 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.759374  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.759383  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.759409  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.759450  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.759475  106211 reflector.go:161] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I1109 03:38:47.759679  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.760002  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.760074  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.760189  106211 store.go:1319] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I1109 03:38:47.760271  106211 reflector.go:161] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I1109 03:38:47.760471  106211 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.760610  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.760650  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.760704  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.760795  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.761885  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.761960  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.762095  106211 store.go:1319] Monitoring configmaps count at <storage-prefix>//configmaps
I1109 03:38:47.762204  106211 reflector.go:161] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I1109 03:38:47.762451  106211 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.762659  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.762675  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.762713  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.762764  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.763045  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.763136  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.763316  106211 store.go:1319] Monitoring namespaces count at <storage-prefix>//namespaces
I1109 03:38:47.763377  106211 reflector.go:161] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I1109 03:38:47.763717  106211 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.763887  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.763923  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.764030  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.764103  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.766025  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.766355  106211 store.go:1319] Monitoring endpoints count at <storage-prefix>//endpoints
I1109 03:38:47.766382  106211 reflector.go:161] Listing and watching *core.Endpoints from storage/cacher.go:/endpoints
I1109 03:38:47.766356  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.766568  106211 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.766689  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.766734  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.766791  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.766879  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.767223  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.767432  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.767705  106211 reflector.go:161] Listing and watching *core.Node from storage/cacher.go:/nodes
I1109 03:38:47.767791  106211 store.go:1319] Monitoring nodes count at <storage-prefix>//nodes
I1109 03:38:47.768576  106211 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.768775  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.768820  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.768875  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.769001  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.769504  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.769623  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.769947  106211 store.go:1319] Monitoring pods count at <storage-prefix>//pods
I1109 03:38:47.770151  106211 reflector.go:161] Listing and watching *core.Pod from storage/cacher.go:/pods
I1109 03:38:47.770863  106211 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.771090  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.771373  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.771622  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.771756  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.772416  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.772505  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.772742  106211 store.go:1319] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I1109 03:38:47.772894  106211 reflector.go:161] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I1109 03:38:47.773310  106211 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.773429  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.773472  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.773631  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.773710  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.774176  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.774354  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.774389  106211 reflector.go:161] Listing and watching *core.Service from storage/cacher.go:/services
I1109 03:38:47.774366  106211 store.go:1319] Monitoring services count at <storage-prefix>//services
I1109 03:38:47.774455  106211 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.774746  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.774789  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.774835  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.774917  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.775951  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.776034  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.776076  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.776090  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.776116  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.776181  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.777902  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.778050  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.778304  106211 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.778482  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.778531  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.778646  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.778736  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.778987  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.779078  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.779172  106211 store.go:1319] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I1109 03:38:47.779270  106211 reflector.go:161] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I1109 03:38:47.796001  106211 master.go:417] Skipping disabled API group "auditregistration.k8s.io".
I1109 03:38:47.796054  106211 master.go:425] Enabling API group "authentication.k8s.io".
I1109 03:38:47.796083  106211 master.go:425] Enabling API group "authorization.k8s.io".
I1109 03:38:47.796290  106211 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.796410  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.796428  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.796470  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.796558  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.797084  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.797547  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.797561  106211 store.go:1319] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I1109 03:38:47.797658  106211 reflector.go:161] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I1109 03:38:47.797779  106211 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.797861  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.797878  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.797917  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.797992  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.798239  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.799051  106211 store.go:1319] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I1109 03:38:47.799076  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.799177  106211 reflector.go:161] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I1109 03:38:47.799644  106211 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.799781  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.799879  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.799946  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.800155  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.800518  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.800617  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.800788  106211 store.go:1319] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I1109 03:38:47.800839  106211 master.go:425] Enabling API group "autoscaling".
I1109 03:38:47.801019  106211 reflector.go:161] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I1109 03:38:47.801222  106211 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.801374  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.801486  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.801596  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.801696  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.802083  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.802492  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.802719  106211 store.go:1319] Monitoring jobs.batch count at <storage-prefix>//jobs
I1109 03:38:47.802857  106211 reflector.go:161] Listing and watching *batch.Job from storage/cacher.go:/jobs
I1109 03:38:47.803493  106211 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.804082  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.804955  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.805038  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.805169  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.805575  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.805915  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.806294  106211 store.go:1319] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I1109 03:38:47.806348  106211 master.go:425] Enabling API group "batch".
I1109 03:38:47.806357  106211 reflector.go:161] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I1109 03:38:47.806668  106211 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.806952  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.807040  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.807148  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.807278  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.807606  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.807694  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.808032  106211 store.go:1319] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I1109 03:38:47.808081  106211 master.go:425] Enabling API group "certificates.k8s.io".
I1109 03:38:47.808146  106211 reflector.go:161] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I1109 03:38:47.808340  106211 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.808989  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.809076  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.809151  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.809574  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.809943  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.810024  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.810165  106211 store.go:1319] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I1109 03:38:47.810200  106211 reflector.go:161] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I1109 03:38:47.810407  106211 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.810515  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.812079  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.812181  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.812302  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.812790  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.812884  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.812996  106211 store.go:1319] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I1109 03:38:47.813092  106211 master.go:425] Enabling API group "coordination.k8s.io".
I1109 03:38:47.813297  106211 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.813418  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.813443  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.813527  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.813061  106211 reflector.go:161] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I1109 03:38:47.813755  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.814096  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.814559  106211 store.go:1319] Monitoring replicationcontrollers count at <storage-prefix>//replicationcontrollers
I1109 03:38:47.814653  106211 reflector.go:161] Listing and watching *core.ReplicationController from storage/cacher.go:/replicationcontrollers
I1109 03:38:47.814850  106211 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.815194  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.815237  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.814934  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.815374  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.815976  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.817356  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.817552  106211 store.go:1319] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I1109 03:38:47.817734  106211 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.817821  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.817837  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.817893  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.817934  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.818027  106211 reflector.go:161] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I1109 03:38:47.818232  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.819613  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.819784  106211 store.go:1319] Monitoring deployments.apps count at <storage-prefix>//deployments
I1109 03:38:47.819868  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.819977  106211 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.820039  106211 reflector.go:161] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I1109 03:38:47.820068  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.820078  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.820135  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.820234  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.820573  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.820645  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.820916  106211 store.go:1319] Monitoring ingresses.extensions count at <storage-prefix>//ingresses
I1109 03:38:47.820990  106211 reflector.go:161] Listing and watching *networking.Ingress from storage/cacher.go:/ingresses
I1109 03:38:47.825874  106211 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.825992  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.826009  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.826045  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.826127  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.828292  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.832427  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.832625  106211 store.go:1319] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I1109 03:38:47.832949  106211 reflector.go:161] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I1109 03:38:47.833451  106211 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.833567  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.833665  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.833742  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.833817  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.835743  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.835784  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.835900  106211 store.go:1319] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I1109 03:38:47.835972  106211 reflector.go:161] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I1109 03:38:47.836135  106211 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.836237  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.836267  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.836320  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.836405  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.836638  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.836721  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.836741  106211 store.go:1319] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I1109 03:38:47.836762  106211 master.go:425] Enabling API group "extensions".
I1109 03:38:47.836794  106211 reflector.go:161] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I1109 03:38:47.836934  106211 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.837007  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.837018  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.837047  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.837115  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.837515  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.837549  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.837597  106211 store.go:1319] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I1109 03:38:47.837656  106211 reflector.go:161] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I1109 03:38:47.837779  106211 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.837861  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.837872  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.837899  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.837966  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.845687  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.845759  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.846034  106211 store.go:1319] Monitoring ingresses.extensions count at <storage-prefix>//ingresses
I1109 03:38:47.846065  106211 master.go:425] Enabling API group "networking.k8s.io".
I1109 03:38:47.846113  106211 reflector.go:161] Listing and watching *networking.Ingress from storage/cacher.go:/ingresses
I1109 03:38:47.846113  106211 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.846202  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.846219  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.846273  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.846403  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.846688  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.846770  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.846824  106211 store.go:1319] Monitoring runtimeclasses.node.k8s.io count at <storage-prefix>//runtimeclasses
I1109 03:38:47.846843  106211 master.go:425] Enabling API group "node.k8s.io".
I1109 03:38:47.847065  106211 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.847150  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.847162  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.847194  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.847239  106211 reflector.go:161] Listing and watching *node.RuntimeClass from storage/cacher.go:/runtimeclasses
I1109 03:38:47.847550  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.847973  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.848080  106211 store.go:1319] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I1109 03:38:47.848289  106211 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.848359  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.848370  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.848398  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.848468  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.848519  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.848613  106211 reflector.go:161] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I1109 03:38:47.848945  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.849074  106211 store.go:1319] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicies
I1109 03:38:47.849088  106211 master.go:425] Enabling API group "policy".
I1109 03:38:47.849114  106211 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.849156  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.849192  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.849202  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.849229  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.849367  106211 reflector.go:161] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicies
I1109 03:38:47.849602  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.849827  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.849914  106211 store.go:1319] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I1109 03:38:47.850116  106211 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.850175  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.850185  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.850236  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.850293  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.850319  106211 reflector.go:161] Listing and watching *rbac.Role from storage/cacher.go:/roles
I1109 03:38:47.850509  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.850823  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.850954  106211 store.go:1319] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I1109 03:38:47.850988  106211 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.851074  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.851087  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.851117  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.851158  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.851207  106211 reflector.go:161] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I1109 03:38:47.851417  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.851664  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.851800  106211 store.go:1319] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I1109 03:38:47.851949  106211 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.853194  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.853677  106211 reflector.go:161] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I1109 03:38:47.854806  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.855055  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.855129  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.855199  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.855464  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.855576  106211 store.go:1319] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I1109 03:38:47.855655  106211 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.855729  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.855739  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.855770  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.855816  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.855842  106211 reflector.go:161] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I1109 03:38:47.856110  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.856359  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.856558  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.857918  106211 store.go:1319] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I1109 03:38:47.857994  106211 reflector.go:161] Listing and watching *rbac.Role from storage/cacher.go:/roles
I1109 03:38:47.859708  106211 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.860939  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.861015  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.861143  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.861351  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.862885  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.863328  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.863479  106211 store.go:1319] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I1109 03:38:47.863774  106211 reflector.go:161] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I1109 03:38:47.866451  106211 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.883000  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.883028  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.883091  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.883190  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.883568  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.883737  106211 store.go:1319] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I1109 03:38:47.883987  106211 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.884077  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.884088  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.884124  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.884186  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.884227  106211 reflector.go:161] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I1109 03:38:47.884609  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.884898  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.886765  106211 store.go:1319] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I1109 03:38:47.886895  106211 master.go:425] Enabling API group "rbac.authorization.k8s.io".
I1109 03:38:47.887279  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.887367  106211 reflector.go:161] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I1109 03:38:47.889641  106211 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1beta1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.889942  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.890018  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.890138  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.890464  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.890799  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.890830  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.891132  106211 store.go:1319] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I1109 03:38:47.891213  106211 reflector.go:161] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I1109 03:38:47.891678  106211 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1beta1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.891787  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.891798  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.891897  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.892080  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.894663  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.894708  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.894893  106211 store.go:1319] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I1109 03:38:47.894913  106211 master.go:425] Enabling API group "scheduling.k8s.io".
I1109 03:38:47.894993  106211 reflector.go:161] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I1109 03:38:47.895055  106211 master.go:417] Skipping disabled API group "settings.k8s.io".
I1109 03:38:47.895271  106211 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.895348  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.895359  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.895426  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.895577  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.896633  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.896904  106211 store.go:1319] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I1109 03:38:47.896947  106211 reflector.go:161] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I1109 03:38:47.896906  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.896983  106211 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.897100  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.897142  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.897196  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.897349  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.897834  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.897883  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.898195  106211 store.go:1319] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I1109 03:38:47.898368  106211 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.898297  106211 reflector.go:161] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I1109 03:38:47.904055  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.904452  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.904514  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.904593  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.905094  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.907138  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.907273  106211 store.go:1319] Monitoring csinodes.storage.k8s.io count at <storage-prefix>//csinodes
I1109 03:38:47.907424  106211 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.907684  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.907809  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.907874  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.907367  106211 reflector.go:161] Listing and watching *storage.CSINode from storage/cacher.go:/csinodes
I1109 03:38:47.908223  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.908583  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.908924  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.908994  106211 store.go:1319] Monitoring csidrivers.storage.k8s.io count at <storage-prefix>//csidrivers
I1109 03:38:47.909078  106211 reflector.go:161] Listing and watching *storage.CSIDriver from storage/cacher.go:/csidrivers
I1109 03:38:47.909790  106211 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.910128  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.910216  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.910333  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.910434  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.911053  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.911205  106211 store.go:1319] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I1109 03:38:47.911280  106211 reflector.go:161] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I1109 03:38:47.911288  106211 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.911387  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.911398  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.911460  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.911514  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.911604  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.911977  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.912187  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.912228  106211 store.go:1319] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I1109 03:38:47.912745  106211 master.go:425] Enabling API group "storage.k8s.io".
I1109 03:38:47.912938  106211 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.913013  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.913023  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.913055  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.912270  106211 reflector.go:161] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I1109 03:38:47.913355  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.913602  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.913770  106211 store.go:1319] Monitoring deployments.apps count at <storage-prefix>//deployments
I1109 03:38:47.913792  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.913832  106211 reflector.go:161] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I1109 03:38:47.914189  106211 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.914369  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.914382  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.914417  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.914494  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.914756  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.914942  106211 store.go:1319] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I1109 03:38:47.915054  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.915116  106211 reflector.go:161] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I1109 03:38:47.915198  106211 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.915292  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.915304  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.915346  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.915546  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.915776  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.915902  106211 store.go:1319] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I1109 03:38:47.916014  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.916156  106211 reflector.go:161] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I1109 03:38:47.916646  106211 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.916783  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.916810  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.916852  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.917042  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.917332  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.917654  106211 store.go:1319] Monitoring deployments.apps count at <storage-prefix>//deployments
I1109 03:38:47.917688  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.917847  106211 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.917912  106211 reflector.go:161] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I1109 03:38:47.917932  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.917945  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.917972  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.918109  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.918433  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.918518  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.918581  106211 store.go:1319] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I1109 03:38:47.918649  106211 reflector.go:161] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I1109 03:38:47.918805  106211 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.918888  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.918903  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.919031  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.919167  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.919418  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.919567  106211 store.go:1319] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I1109 03:38:47.919703  106211 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.919804  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.919817  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.919878  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.919930  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.920032  106211 reflector.go:161] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I1109 03:38:47.920203  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.921087  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.921123  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.921292  106211 store.go:1319] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I1109 03:38:47.921582  106211 reflector.go:161] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I1109 03:38:47.921641  106211 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.921708  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.921727  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.921755  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.921842  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.922118  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.922195  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.922352  106211 store.go:1319] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I1109 03:38:47.922530  106211 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.922605  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.922618  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.922652  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.922698  106211 reflector.go:161] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I1109 03:38:47.922902  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.923178  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.923337  106211 store.go:1319] Monitoring deployments.apps count at <storage-prefix>//deployments
I1109 03:38:47.923498  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.923487  106211 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.923607  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.923618  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.923644  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.923727  106211 reflector.go:161] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I1109 03:38:47.923749  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.924002  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.924059  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.924126  106211 store.go:1319] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I1109 03:38:47.924361  106211 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.924449  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.924469  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.924535  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.924579  106211 reflector.go:161] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I1109 03:38:47.924852  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.934969  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.935432  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.935486  106211 store.go:1319] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I1109 03:38:47.935512  106211 reflector.go:161] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I1109 03:38:47.935834  106211 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.935964  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.935977  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.936011  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.936109  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.938580  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.938789  106211 store.go:1319] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I1109 03:38:47.938868  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.938969  106211 reflector.go:161] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I1109 03:38:47.938988  106211 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.939090  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.939112  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.939147  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.939309  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.939609  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.939764  106211 store.go:1319] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I1109 03:38:47.939784  106211 master.go:425] Enabling API group "apps".
I1109 03:38:47.939821  106211 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.939850  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.939895  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.939898  106211 reflector.go:161] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I1109 03:38:47.939905  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.939932  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.940094  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.940378  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.940421  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.940453  106211 store.go:1319] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I1109 03:38:47.940478  106211 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.940552  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.940563  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.940591  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.940659  106211 reflector.go:161] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I1109 03:38:47.940857  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.941979  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.942206  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.942281  106211 store.go:1319] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I1109 03:38:47.942283  106211 reflector.go:161] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I1109 03:38:47.942306  106211 master.go:425] Enabling API group "admissionregistration.k8s.io".
I1109 03:38:47.942350  106211 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"5dd9cb55-6304-4b3a-975b-2b36a6ed5dd2", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", CAFile:""}, Quorum:false, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 03:38:47.942948  106211 client.go:352] parsed scheme: ""
I1109 03:38:47.942975  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:47.943006  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:47.943707  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.944214  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:47.944280  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:47.944369  106211 store.go:1319] Monitoring events count at <storage-prefix>//events
I1109 03:38:47.944400  106211 master.go:425] Enabling API group "events.k8s.io".
W1109 03:38:47.951881  106211 genericapiserver.go:344] Skipping API batch/v2alpha1 because it has no resources.
W1109 03:38:47.964799  106211 genericapiserver.go:344] Skipping API node.k8s.io/v1alpha1 because it has no resources.
W1109 03:38:47.971900  106211 genericapiserver.go:344] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
W1109 03:38:48.039177  106211 genericapiserver.go:344] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
W1109 03:38:48.055673  106211 genericapiserver.go:344] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
I1109 03:38:48.109652  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.109688  106211 healthz.go:161] healthz check poststarthook/bootstrap-controller failed: not finished
I1109 03:38:48.109698  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.109708  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.109717  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.109853  106211 wrap.go:47] GET /healthz: (296.853µs) 500
goroutine 29468 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc006fdd5e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc006fdd5e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc006f31880, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00bf29560, 0xc009288000, 0x18a, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00bf29560, 0xc006f55200)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00bf29560, 0xc006f55200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00bf29560, 0xc006f55200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00bf29560, 0xc006f55200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00bf29560, 0xc006f55200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00bf29560, 0xc006f55200)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00bf29560, 0xc006f55200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00bf29560, 0xc006f55200)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00bf29560, 0xc006f55200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00bf29560, 0xc006f55200)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00bf29560, 0xc006f55200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00bf29560, 0xc006f55100)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00bf29560, 0xc006f55100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00d557c20, 0xc00edd15a0, 0x75ce240, 0xc00bf29560, 0xc006f55100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[-]poststarthook/bootstrap-controller failed: reason withheld\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.110966  106211 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.313597ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:48.113733  106211 wrap.go:47] GET /api/v1/services: (1.083681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:48.117530  106211 wrap.go:47] GET /api/v1/services: (990.077µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:48.119527  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.119552  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.119563  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.119572  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.119712  106211 wrap.go:47] GET /healthz: (262.732µs) 500
goroutine 29491 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e8d80e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e8d80e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ebbe5c0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0027fe5a8, 0xc001ebe300, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ac00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0027fe5a8, 0xc00ce1ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e8a07e0, 0xc00edd15a0, 0x75ce240, 0xc0027fe5a8, 0xc00ce1ac00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.120796  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.297833ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:48.121939  106211 wrap.go:47] GET /api/v1/services: (1.100088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.121982  106211 wrap.go:47] GET /api/v1/services: (867.226µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:48.124200  106211 wrap.go:47] POST /api/v1/namespaces: (3.029309ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40766]
I1109 03:38:48.125678  106211 wrap.go:47] GET /api/v1/namespaces/kube-public: (993.971µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.127921  106211 wrap.go:47] POST /api/v1/namespaces: (1.721772ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.129215  106211 wrap.go:47] GET /api/v1/namespaces/kube-node-lease: (1.013271ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.131085  106211 wrap.go:47] POST /api/v1/namespaces: (1.391795ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.210649  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.210807  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.210839  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.210877  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.211137  106211 wrap.go:47] GET /healthz: (640.409µs) 500
goroutine 29530 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e7ef180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e7ef180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00eb9c680, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a112158, 0xc00e488c00, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a112158, 0xc00e38dc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a112158, 0xc00e38db00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a112158, 0xc00e38db00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00ebedb60, 0xc00edd15a0, 0x75ce240, 0xc00a112158, 0xc00e38db00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.220464  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.220563  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.220592  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.220632  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.220831  106211 wrap.go:47] GET /healthz: (534.407µs) 500
goroutine 29489 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e765650, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e765650, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec36200, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e342278, 0xc00999a900, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e342278, 0xc00cb3ad00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e342278, 0xc00cb3ac00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e342278, 0xc00cb3ac00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e9a7e00, 0xc00edd15a0, 0x75ce240, 0xc00e342278, 0xc00cb3ac00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.310838  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.310879  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.310891  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.310899  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.311088  106211 wrap.go:47] GET /healthz: (393.696µs) 500
goroutine 29407 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e90c4d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e90c4d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ebaec40, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e19a0b0, 0xc005e8e600, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e19a0b0, 0xc00cddd000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e19a0b0, 0xc00cddcf00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e19a0b0, 0xc00cddcf00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00ec62de0, 0xc00edd15a0, 0x75ce240, 0xc00e19a0b0, 0xc00cddcf00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.320576  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.320620  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.320632  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.320640  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.325181  106211 wrap.go:47] GET /healthz: (4.72266ms) 500
goroutine 29441 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e2fa000, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e2fa000, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec502e0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc005e40020, 0xc00c17a300, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc005e40020, 0xc00cc86f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc005e40020, 0xc00cc86c00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc005e40020, 0xc00cc86c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e4da720, 0xc00edd15a0, 0x75ce240, 0xc005e40020, 0xc00cc86c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.410686  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.410732  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.410745  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.410754  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.410920  106211 wrap.go:47] GET /healthz: (363.359µs) 500
goroutine 29539 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e2fa150, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e2fa150, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec50500, 0x1f4)
net/http.Error(0x7fb280505648, 0xc005e40028, 0xc00c17a900, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc005e40028, 0xc00cc87500)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc005e40028, 0xc00cc87500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc005e40028, 0xc00cc87500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc005e40028, 0xc00cc87500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc005e40028, 0xc00cc87500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc005e40028, 0xc00cc87500)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc005e40028, 0xc00cc87500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc005e40028, 0xc00cc87500)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc005e40028, 0xc00cc87500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc005e40028, 0xc00cc87500)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc005e40028, 0xc00cc87500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc005e40028, 0xc00cc87300)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc005e40028, 0xc00cc87300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e4daa20, 0xc00edd15a0, 0x75ce240, 0xc005e40028, 0xc00cc87300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.420473  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.420520  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.420540  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.420549  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.420697  106211 wrap.go:47] GET /healthz: (364.909µs) 500
goroutine 29375 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00ec06c40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00ec06c40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e91ea60, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0009ba980, 0xc0020fa780, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0009ba980, 0xc00d4abd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0009ba980, 0xc00d4abc00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0009ba980, 0xc00d4abc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e92f500, 0xc00edd15a0, 0x75ce240, 0xc0009ba980, 0xc00d4abc00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.510746  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.510787  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.510799  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.510807  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.510945  106211 wrap.go:47] GET /healthz: (332.188µs) 500
goroutine 29541 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e2fa230, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e2fa230, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec50600, 0x1f4)
net/http.Error(0x7fb280505648, 0xc005e40050, 0xc00c17ad80, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc005e40050, 0xc00cc87b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc005e40050, 0xc00cc87a00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc005e40050, 0xc00cc87a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e4db260, 0xc00edd15a0, 0x75ce240, 0xc005e40050, 0xc00cc87a00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.520499  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.520535  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.520548  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.520557  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.520724  106211 wrap.go:47] GET /healthz: (365.03µs) 500
goroutine 29555 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e765730, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e765730, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec36420, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e342280, 0xc00999af00, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e342280, 0xc00cb3b500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e342280, 0xc00cb3b300)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e342280, 0xc00cb3b300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e58a120, 0xc00edd15a0, 0x75ce240, 0xc00e342280, 0xc00cb3b300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.610716  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.610757  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.610769  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.610777  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.610932  106211 wrap.go:47] GET /healthz: (392.617µs) 500
goroutine 29532 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e7ef1f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e7ef1f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00eb9c720, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a112160, 0xc00e489080, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a112160, 0xc00aebe100)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a112160, 0xc00aebe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a112160, 0xc00aebe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a112160, 0xc00aebe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a112160, 0xc00aebe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a112160, 0xc00aebe100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a112160, 0xc00aebe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a112160, 0xc00aebe100)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a112160, 0xc00aebe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a112160, 0xc00aebe100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a112160, 0xc00aebe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a112160, 0xc00e38df00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a112160, 0xc00e38df00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e956000, 0xc00edd15a0, 0x75ce240, 0xc00a112160, 0xc00e38df00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.620532  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.620571  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.620590  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.620598  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.620771  106211 wrap.go:47] GET /healthz: (347.557µs) 500
goroutine 29543 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e2fa310, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e2fa310, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec506a0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc005e40058, 0xc00c17b500, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc005e40058, 0xc00cc87f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc005e40058, 0xc00cc87e00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc005e40058, 0xc00cc87e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e4db320, 0xc00edd15a0, 0x75ce240, 0xc005e40058, 0xc00cc87e00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.710722  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.710757  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.710769  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.710778  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.710923  106211 wrap.go:47] GET /healthz: (352.159µs) 500
goroutine 29545 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e2fa3f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e2fa3f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec50740, 0x1f4)
net/http.Error(0x7fb280505648, 0xc005e40060, 0xc00c17b980, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc005e40060, 0xc00a1e6500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc005e40060, 0xc00a1e6400)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc005e40060, 0xc00a1e6400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e4db3e0, 0xc00edd15a0, 0x75ce240, 0xc005e40060, 0xc00a1e6400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.720407  106211 healthz.go:161] healthz check etcd failed: etcd client connection not yet established
I1109 03:38:48.720441  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.720451  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.720456  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.720568  106211 wrap.go:47] GET /healthz: (290.782µs) 500
goroutine 29547 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e2fa540, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e2fa540, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec507e0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc005e40068, 0xc00c17be00, 0x175, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc005e40068, 0xc00a1e6a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc005e40068, 0xc00a1e6900)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc005e40068, 0xc00a1e6900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e4db4a0, 0xc00edd15a0, 0x75ce240, 0xc005e40068, 0xc00a1e6900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[-]etcd failed: reason withheld\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.750301  106211 client.go:352] parsed scheme: ""
I1109 03:38:48.750345  106211 client.go:352] scheme "" not registered, fallback to default scheme
I1109 03:38:48.750395  106211 asm_amd64.s:1337] ccResolverWrapper: sending new addresses to cc: [{127.0.0.1:2379 0  <nil>}]
I1109 03:38:48.750481  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:48.751019  106211 balancer_conn_wrappers.go:131] clientv3/balancer: pin "127.0.0.1:2379"
I1109 03:38:48.751083  106211 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 <nil>}]
I1109 03:38:48.811940  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.811970  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.811980  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.812196  106211 wrap.go:47] GET /healthz: (1.566744ms) 500
goroutine 29557 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e765810, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e765810, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec36820, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e3422a8, 0xc00bf8e420, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e3422a8, 0xc00cb3be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e3422a8, 0xc00cb3bd00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e3422a8, 0xc00cb3bd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e58a360, 0xc00edd15a0, 0x75ce240, 0xc00e3422a8, 0xc00cb3bd00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.821345  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.821378  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.821388  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.821542  106211 wrap.go:47] GET /healthz: (1.222309ms) 500
goroutine 29559 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e765a40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e765a40, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec36a20, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e3422b8, 0xc00bf8e6e0, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e3422b8, 0xc0087a8500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e3422b8, 0xc0087a8200)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e3422b8, 0xc0087a8200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e58a660, 0xc00edd15a0, 0x75ce240, 0xc00e3422b8, 0xc0087a8200)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:48.911555  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.911583  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.911594  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.911758  106211 wrap.go:47] GET /healthz: (1.264823ms) 500
goroutine 29534 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e7ef500, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e7ef500, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00eb9cc40, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a1121d0, 0xc00bf8e9a0, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a1121d0, 0xc00aebec00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a1121d0, 0xc00aebeb00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a1121d0, 0xc00aebeb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e9562a0, 0xc00edd15a0, 0x75ce240, 0xc00a1121d0, 0xc00aebeb00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:48.921309  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:48.921338  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:48.921348  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:48.921524  106211 wrap.go:47] GET /healthz: (1.226417ms) 500
goroutine 29553 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e2fa7e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e2fa7e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec50ba0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc005e400a8, 0xc00e3402c0, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc005e400a8, 0xc00a1e7200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc005e400a8, 0xc00a1e7100)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc005e400a8, 0xc00a1e7100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e4dbc80, 0xc00edd15a0, 0x75ce240, 0xc005e400a8, 0xc00a1e7100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:49.011772  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.011809  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:49.011819  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:49.011983  106211 wrap.go:47] GET /healthz: (1.44134ms) 500
goroutine 29536 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e7ef650, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e7ef650, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00eb9d280, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a112230, 0xc00bf8ec60, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a112230, 0xc00aebf600)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a112230, 0xc00aebf600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a112230, 0xc00aebf600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a112230, 0xc00aebf600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a112230, 0xc00aebf600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a112230, 0xc00aebf600)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a112230, 0xc00aebf600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a112230, 0xc00aebf600)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a112230, 0xc00aebf600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a112230, 0xc00aebf600)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a112230, 0xc00aebf600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a112230, 0xc00aebf500)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a112230, 0xc00aebf500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e956780, 0xc00edd15a0, 0x75ce240, 0xc00a112230, 0xc00aebf500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40762]
I1109 03:38:49.021470  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.021501  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:49.021511  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:49.021662  106211 wrap.go:47] GET /healthz: (1.321178ms) 500
goroutine 29586 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e7ef7a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e7ef7a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00eb9d5a0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a112270, 0xc00bf8ef20, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a112270, 0xc00aebfe00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a112270, 0xc00aebfd00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a112270, 0xc00aebfd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e956ae0, 0xc00edd15a0, 0x75ce240, 0xc00a112270, 0xc00aebfd00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:49.111932  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.111971  106211 healthz.go:161] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 03:38:49.111981  106211 healthz.go:161] healthz check poststarthook/ca-registration failed: not finished
I1109 03:38:49.112064  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.841962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:49.112140  106211 wrap.go:47] GET /healthz: (1.226679ms) 500
goroutine 29604 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e90c7e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e90c7e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ebaf1a0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e19a100, 0xc0028b8580, 0x160, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e19a100, 0xc008ffe300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e19a100, 0xc008ffe200)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e19a100, 0xc008ffe200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00ec63920, 0xc00edd15a0, 0x75ce240, 0xc00e19a100, 0xc008ffe200)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld\n[-]poststarthook/ca-registration failed: reason withheld\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:49.112555  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.933298ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:49.113497  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (943.464µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:49.113670  106211 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-node-critical: (3.452881ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.113815  106211 wrap.go:47] GET /api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: (821.89µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40762]
I1109 03:38:49.115847  106211 wrap.go:47] POST /api/v1/namespaces/kube-system/configmaps: (1.421462ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.116019  106211 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.894141ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.116336  106211 storage_scheduling.go:113] created PriorityClass system-node-critical with value 2000001000
I1109 03:38:49.116607  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (2.434043ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:49.118133  106211 wrap.go:47] GET /apis/scheduling.k8s.io/v1beta1/priorityclasses/system-cluster-critical: (1.355662ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.120707  106211 wrap.go:47] POST /apis/scheduling.k8s.io/v1beta1/priorityclasses: (1.822494ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.120798  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (2.347527ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40764]
I1109 03:38:49.120987  106211 storage_scheduling.go:113] created PriorityClass system-cluster-critical with value 2000000000
I1109 03:38:49.121003  106211 storage_scheduling.go:122] all system priority classes are created successfully or already exist.
I1109 03:38:49.121458  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.121615  106211 wrap.go:47] GET /healthz: (1.323062ms) 500
goroutine 29563 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e765f80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e765f80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00ec376e0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e342340, 0xc002acedc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e342340, 0xc0087a9900)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e342340, 0xc0087a9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e342340, 0xc0087a9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e342340, 0xc0087a9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e342340, 0xc0087a9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e342340, 0xc0087a9900)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e342340, 0xc0087a9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e342340, 0xc0087a9900)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e342340, 0xc0087a9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e342340, 0xc0087a9900)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e342340, 0xc0087a9900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e342340, 0xc0087a9800)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e342340, 0xc0087a9800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00e58aba0, 0xc00edd15a0, 0x75ce240, 0xc00e342340, 0xc0087a9800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.123045  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (1.006127ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.124214  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (750.898µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.125222  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (680.181µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.126206  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (652.353µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.127499  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (795.437µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.128543  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (736.841µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.130441  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.601962ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.130651  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I1109 03:38:49.131750  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (906.394µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.133857  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.801688ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.134054  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:discovery
I1109 03:38:49.135034  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (699.6µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.137090  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.55825ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.137238  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I1109 03:38:49.138491  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:public-info-viewer: (671.352µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.140823  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.936583ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.141045  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:public-info-viewer
I1109 03:38:49.142149  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (768.965µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.143712  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.272918ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.143934  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/admin
I1109 03:38:49.145181  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (1.009536ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.146875  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.314719ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.147422  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/edit
I1109 03:38:49.148591  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (982.683µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.151327  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.402348ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.151651  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/view
I1109 03:38:49.152518  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (699.501µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.154105  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.259092ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.154333  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I1109 03:38:49.155153  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (681.99µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.157373  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.721563ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.158234  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I1109 03:38:49.160687  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (2.270607ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.162972  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.785232ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.163233  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I1109 03:38:49.164383  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (814.391µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.166088  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.429718ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.166259  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:heapster
I1109 03:38:49.167492  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (925.185µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.170195  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.847987ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.170592  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:node
I1109 03:38:49.171647  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (686.672µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.173776  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.638315ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.174026  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I1109 03:38:49.175043  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (831.01µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.176802  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.269727ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.177497  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I1109 03:38:49.178761  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (1.071834ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.180869  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.739256ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.181064  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I1109 03:38:49.182353  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (841.713µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.184180  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.392901ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.184457  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I1109 03:38:49.186099  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (1.109535ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.189394  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.784407ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.189684  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I1109 03:38:49.190697  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (846.504µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.192880  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.884111ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.193373  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I1109 03:38:49.194853  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (1.239095ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.197488  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.296143ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.197750  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I1109 03:38:49.198904  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (807.906µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.201314  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.808516ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.201587  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I1109 03:38:49.203231  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (1.351019ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.205973  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.97524ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.206208  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I1109 03:38:49.207619  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (978.056µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.209621  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.664747ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.209850  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I1109 03:38:49.211103  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-attacher: (1.091201ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.211181  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.211440  106211 wrap.go:47] GET /healthz: (972.94µs) 500
goroutine 29722 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e1d4770, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e1d4770, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e38bd80, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e19a630, 0xc003767b80, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e19a630, 0xc0012da700)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e19a630, 0xc0012da700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e19a630, 0xc0012da700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e19a630, 0xc0012da700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e19a630, 0xc0012da700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e19a630, 0xc0012da700)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e19a630, 0xc0012da700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e19a630, 0xc0012da700)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e19a630, 0xc0012da700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e19a630, 0xc0012da700)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e19a630, 0xc0012da700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e19a630, 0xc0012da400)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e19a630, 0xc0012da400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00b8c9740, 0xc00edd15a0, 0x75ce240, 0xc00e19a630, 0xc0012da400)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:49.216573  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.108214ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.216837  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:csi-external-attacher
I1109 03:38:49.218020  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aws-cloud-provider: (898.499µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.221137  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.221460  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.97824ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.221526  106211 wrap.go:47] GET /healthz: (1.350921ms) 500
goroutine 29752 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e1b2a80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e1b2a80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00e486a40, 0x1f4)
net/http.Error(0x7fb280505648, 0xc009ef8628, 0xc001dc48c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc009ef8628, 0xc00262f800)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc009ef8628, 0xc00262f800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc009ef8628, 0xc00262f800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc009ef8628, 0xc00262f800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc009ef8628, 0xc00262f800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc009ef8628, 0xc00262f800)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc009ef8628, 0xc00262f800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc009ef8628, 0xc00262f800)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc009ef8628, 0xc00262f800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc009ef8628, 0xc00262f800)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc009ef8628, 0xc00262f800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc009ef8628, 0xc00262f500)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc009ef8628, 0xc00262f500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00a0bf020, 0xc00edd15a0, 0x75ce240, 0xc009ef8628, 0xc00262f500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.221691  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:aws-cloud-provider
I1109 03:38:49.222900  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (980.422µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.224925  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.437757ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.225636  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I1109 03:38:49.226678  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (759.825µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.228874  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.77382ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.229129  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I1109 03:38:49.230210  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (899.357µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.234723  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.649828ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.235023  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I1109 03:38:49.235882  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:csi-external-provisioner: (704.055µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.240223  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.920973ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.240513  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:csi-external-provisioner
I1109 03:38:49.241767  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (1.101827ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.246493  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.125213ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.246960  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I1109 03:38:49.248135  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (933.08µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.253351  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.732937ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.253537  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I1109 03:38:49.254488  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (781.834µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.256873  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.080143ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.257055  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I1109 03:38:49.258842  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (1.665286ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.261030  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.896322ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.261218  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I1109 03:38:49.262359  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (993.002µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.264013  106211 cacher.go:648] cacher (*rbac.ClusterRole): 1 objects queued in incoming channel.
I1109 03:38:49.264485  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.821539ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.264946  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I1109 03:38:49.266978  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (1.074823ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.269844  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.089086ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.270130  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I1109 03:38:49.271503  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (1.049762ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.273204  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.343533ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.273467  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I1109 03:38:49.274727  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (992.486µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.277551  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.167013ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.277783  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I1109 03:38:49.278923  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (833.407µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.281095  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.699213ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.281421  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I1109 03:38:49.282447  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (771.854µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.284282  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.42614ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.284583  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I1109 03:38:49.285720  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (944.795µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.288466  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.34754ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.288678  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I1109 03:38:49.289828  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (914.382µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.293010  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.799052ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.293173  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I1109 03:38:49.294603  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (1.136607ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.297080  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.097808ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.297924  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I1109 03:38:49.299133  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (928.846µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.301623  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.957607ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.301932  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I1109 03:38:49.303063  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (917.407µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.305341  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.701512ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.305550  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I1109 03:38:49.306533  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (827.873µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.308696  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.75131ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.308909  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I1109 03:38:49.310117  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (1.026207ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.311045  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.311384  106211 wrap.go:47] GET /healthz: (1.012264ms) 500
goroutine 29567 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e610cb0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e610cb0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00dcc7700, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e3424a0, 0xc00e9a8280, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e3424a0, 0xc003a02e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e3424a0, 0xc003a02d00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e3424a0, 0xc003a02d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc001c623c0, 0xc00edd15a0, 0x75ce240, 0xc00e3424a0, 0xc003a02d00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:49.312407  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.728788ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.312591  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I1109 03:38:49.313609  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (878.841µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.315626  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.593861ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.315890  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I1109 03:38:49.316989  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (867.894µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.319029  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.472146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.319293  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I1109 03:38:49.320568  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (1.034588ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.321109  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.321292  106211 wrap.go:47] GET /healthz: (892.861µs) 500
goroutine 29880 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e134540, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e134540, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00dc851c0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a41c0, 0xc00363c8c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a41c0, 0xc0033d9c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a41c0, 0xc0033d9b00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a41c0, 0xc0033d9b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc004066900, 0xc00edd15a0, 0x75ce240, 0xc0039a41c0, 0xc0033d9b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.322970  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.003932ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.323197  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I1109 03:38:49.324304  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (873.595µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.326076  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.323787ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.326455  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I1109 03:38:49.327675  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (930.795µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.329553  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.560258ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.331000  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I1109 03:38:49.332025  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (684.221µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.333920  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.451547ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.334120  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I1109 03:38:49.351952  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (1.749599ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.372696  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.730222ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.372934  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I1109 03:38:49.391668  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (1.599575ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.413672  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.414105  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.088358ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.414198  106211 wrap.go:47] GET /healthz: (3.723812ms) 500
goroutine 29903 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e1253b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e1253b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00da81a80, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e3428c8, 0xc00b86a8c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e3428c8, 0xc00346d900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e3428c8, 0xc00346d800)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e3428c8, 0xc00346d800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc005ba75c0, 0xc00edd15a0, 0x75ce240, 0xc00e3428c8, 0xc00346d800)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:49.414559  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I1109 03:38:49.421858  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.422278  106211 wrap.go:47] GET /healthz: (1.78093ms) 500
goroutine 29868 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0fe1c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0fe1c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00da2e200, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e590768, 0xc00e9a8640, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e590768, 0xc003e78f00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e590768, 0xc003e78f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e590768, 0xc003e78f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e590768, 0xc003e78f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e590768, 0xc003e78f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e590768, 0xc003e78f00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e590768, 0xc003e78f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e590768, 0xc003e78f00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e590768, 0xc003e78f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e590768, 0xc003e78f00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e590768, 0xc003e78f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e590768, 0xc003e78e00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e590768, 0xc003e78e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc005964d20, 0xc00edd15a0, 0x75ce240, 0xc00e590768, 0xc003e78e00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.431745  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (1.526025ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.453177  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.142209ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.453442  106211 storage_rbac.go:195] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I1109 03:38:49.472426  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.394464ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.494462  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.246776ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.494732  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I1109 03:38:49.511896  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.512539  106211 wrap.go:47] GET /healthz: (2.095345ms) 500
goroutine 29873 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0fe5b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0fe5b0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00da2f000, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e590848, 0xc00d6983c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e590848, 0xc003e79e00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e590848, 0xc003e79e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e590848, 0xc003e79e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e590848, 0xc003e79e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e590848, 0xc003e79e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e590848, 0xc003e79e00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e590848, 0xc003e79e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e590848, 0xc003e79e00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e590848, 0xc003e79e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e590848, 0xc003e79e00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e590848, 0xc003e79e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e590848, 0xc003e79d00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e590848, 0xc003e79d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0060c75c0, 0xc00edd15a0, 0x75ce240, 0xc00e590848, 0xc003e79d00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:49.513367  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (2.543958ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.521575  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.521891  106211 wrap.go:47] GET /healthz: (1.491026ms) 500
goroutine 29939 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0fe690, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0fe690, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00da2f1e0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e590858, 0xc002b02a00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e590858, 0xc0057ee400)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e590858, 0xc0057ee400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e590858, 0xc0057ee400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e590858, 0xc0057ee400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e590858, 0xc0057ee400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e590858, 0xc0057ee400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e590858, 0xc0057ee400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e590858, 0xc0057ee400)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e590858, 0xc0057ee400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e590858, 0xc0057ee400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e590858, 0xc0057ee400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e590858, 0xc0057ee100)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e590858, 0xc0057ee100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0060f1440, 0xc00edd15a0, 0x75ce240, 0xc00e590858, 0xc0057ee100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.532415  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.290401ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.532649  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I1109 03:38:49.552038  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (1.951102ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.573857  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.480528ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.574100  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I1109 03:38:49.592508  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:public-info-viewer: (1.544168ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.613813  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.65186ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.614113  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:public-info-viewer
I1109 03:38:49.614399  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.614678  106211 wrap.go:47] GET /healthz: (4.066873ms) 500
goroutine 29943 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0fea80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0fea80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00da2f940, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e5908c0, 0xc00d698780, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e5908c0, 0xc0058e6300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e5908c0, 0xc0058e6000)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e5908c0, 0xc0058e6000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00647a4e0, 0xc00edd15a0, 0x75ce240, 0xc00e5908c0, 0xc0058e6000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:49.621502  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.621683  106211 wrap.go:47] GET /healthz: (1.388484ms) 500
goroutine 29913 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0ec770, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0ec770, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00d924a80, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a113408, 0xc00d698b40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a113408, 0xc004b3a000)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a113408, 0xc004b3a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a113408, 0xc004b3a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a113408, 0xc004b3a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a113408, 0xc004b3a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a113408, 0xc004b3a000)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a113408, 0xc004b3a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a113408, 0xc004b3a000)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a113408, 0xc004b3a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a113408, 0xc004b3a000)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a113408, 0xc004b3a000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a113408, 0xc005825f00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a113408, 0xc005825f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0064957a0, 0xc00edd15a0, 0x75ce240, 0xc00a113408, 0xc005825f00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.631452  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (1.503945ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.652916  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.876665ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.653724  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I1109 03:38:49.672275  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (2.130614ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.692844  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.769585ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.693093  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I1109 03:38:49.711715  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.712641  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (1.393639ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.713502  106211 wrap.go:47] GET /healthz: (2.560945ms) 500
goroutine 29971 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0d8460, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0d8460, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00d7f0280, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e19afa0, 0xc002acf400, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e19afa0, 0xc00563b600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e19afa0, 0xc00563b500)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e19afa0, 0xc00563b500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0061b5a40, 0xc00edd15a0, 0x75ce240, 0xc00e19afa0, 0xc00563b500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:49.721561  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.721731  106211 wrap.go:47] GET /healthz: (1.394191ms) 500
goroutine 29957 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e1359d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e1359d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00d885fa0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a48c8, 0xc00d699180, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a48c8, 0xc005f54c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a48c8, 0xc005f54b00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a48c8, 0xc005f54b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006a18f60, 0xc00edd15a0, 0x75ce240, 0xc0039a48c8, 0xc005f54b00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.732758  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.835919ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.733024  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I1109 03:38:49.751896  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (1.956047ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.773072  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.059796ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.773582  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I1109 03:38:49.791820  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:aws-cloud-provider: (1.68057ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.812362  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.812559  106211 wrap.go:47] GET /healthz: (1.887837ms) 500
goroutine 29919 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0edb20, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0edb20, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00d7a8fa0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a1135c0, 0xc002b02dc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a1135c0, 0xc006269800)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a1135c0, 0xc006269800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a1135c0, 0xc006269800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a1135c0, 0xc006269800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a1135c0, 0xc006269800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a1135c0, 0xc006269800)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a1135c0, 0xc006269800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a1135c0, 0xc006269800)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a1135c0, 0xc006269800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a1135c0, 0xc006269800)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a1135c0, 0xc006269800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a1135c0, 0xc006269700)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a1135c0, 0xc006269700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0068e4a80, 0xc00edd15a0, 0x75ce240, 0xc00a1135c0, 0xc006269700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:49.813777  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.078359ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.814143  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:aws-cloud-provider
I1109 03:38:49.821518  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.821776  106211 wrap.go:47] GET /healthz: (1.520301ms) 500
goroutine 29950 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0ffb20, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0ffb20, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00d839da0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e590b00, 0xc00d699540, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e590b00, 0xc00697f200)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e590b00, 0xc00697f200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e590b00, 0xc00697f200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e590b00, 0xc00697f200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e590b00, 0xc00697f200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e590b00, 0xc00697f200)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e590b00, 0xc00697f200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e590b00, 0xc00697f200)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e590b00, 0xc00697f200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e590b00, 0xc00697f200)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e590b00, 0xc00697f200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e590b00, 0xc00697f100)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e590b00, 0xc00697f100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00337e060, 0xc00edd15a0, 0x75ce240, 0xc00e590b00, 0xc00697f100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.831930  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (1.838867ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.853147  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.242428ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.853633  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I1109 03:38:49.872191  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (2.018933ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.893196  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.751752ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.893712  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I1109 03:38:49.912146  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.912233  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (2.126651ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:49.912622  106211 wrap.go:47] GET /healthz: (2.234391ms) 500
goroutine 29975 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0d8bd0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0d8bd0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00d7f1d00, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e19b0c8, 0xc002acf900, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e19b0c8, 0xc00601f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e19b0c8, 0xc00601f600)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e19b0c8, 0xc00601f600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc004fc0060, 0xc00edd15a0, 0x75ce240, 0xc00e19b0c8, 0xc00601f600)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:49.921764  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:49.921987  106211 wrap.go:47] GET /healthz: (1.638816ms) 500
goroutine 29921 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0edf80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0edf80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00d7a9da0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a1136e8, 0xc00d699a40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a1136e8, 0xc006b41100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a1136e8, 0xc006b41000)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a1136e8, 0xc006b41000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0056b0b40, 0xc00edd15a0, 0x75ce240, 0xc00a1136e8, 0xc006b41000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.933496  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.394605ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.934150  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I1109 03:38:49.951730  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.64737ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.974740  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.658789ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:49.975049  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I1109 03:38:49.991776  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (1.768227ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.012113  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.012316  106211 wrap.go:47] GET /healthz: (1.70685ms) 500
goroutine 29996 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0c50a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0c50a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a4e4ea0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e590d58, 0xc002b03cc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e590d58, 0xc006207100)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e590d58, 0xc006207100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e590d58, 0xc006207100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e590d58, 0xc006207100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e590d58, 0xc006207100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e590d58, 0xc006207100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e590d58, 0xc006207100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e590d58, 0xc006207100)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e590d58, 0xc006207100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e590d58, 0xc006207100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e590d58, 0xc006207100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e590d58, 0xc006207000)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e590d58, 0xc006207000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc004d2c480, 0xc00edd15a0, 0x75ce240, 0xc00e590d58, 0xc006207000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:50.013562  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.541902ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.013813  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I1109 03:38:50.021498  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.021672  106211 wrap.go:47] GET /healthz: (1.283362ms) 500
goroutine 29984 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0d97a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0d97a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a49fd40, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e19b200, 0xc002eba000, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e19b200, 0xc007a92d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e19b200, 0xc007a92c00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e19b200, 0xc007a92c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc004fc1740, 0xc00edd15a0, 0x75ce240, 0xc00e19b200, 0xc007a92c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.031454  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (1.442976ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.053185  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.200622ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.053608  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I1109 03:38:50.071811  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (1.706008ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.093369  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.29986ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.093731  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I1109 03:38:50.112227  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (1.371509ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.113480  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.113681  106211 wrap.go:47] GET /healthz: (1.15217ms) 500
goroutine 29848 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0f4e00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0f4e00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a41b060, 0x1f4)
net/http.Error(0x7fb280505648, 0xc009ef8ba8, 0xc001dc5e00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc009ef8ba8, 0xc004b3fa00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc009ef8ba8, 0xc004b3f900)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc009ef8ba8, 0xc004b3f900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0058623c0, 0xc00edd15a0, 0x75ce240, 0xc009ef8ba8, 0xc004b3f900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:50.124094  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.124429  106211 wrap.go:47] GET /healthz: (2.985484ms) 500
goroutine 29998 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0c5180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0c5180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a4e53a0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e590d80, 0xc0004992c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e590d80, 0xc006207b00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e590d80, 0xc006207b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e590d80, 0xc006207b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e590d80, 0xc006207b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e590d80, 0xc006207b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e590d80, 0xc006207b00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e590d80, 0xc006207b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e590d80, 0xc006207b00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e590d80, 0xc006207b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e590d80, 0xc006207b00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e590d80, 0xc006207b00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e590d80, 0xc006207a00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e590d80, 0xc006207a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc004d2ca20, 0xc00edd15a0, 0x75ce240, 0xc00e590d80, 0xc006207a00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.131953  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.030829ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.132671  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I1109 03:38:50.151188  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.2221ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.172040  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.083234ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.172327  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I1109 03:38:50.191169  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (1.262557ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.212373  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.39371ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.212746  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I1109 03:38:50.214166  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.214351  106211 wrap.go:47] GET /healthz: (2.221716ms) 500
goroutine 29935 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e09a460, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e09a460, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a35c000, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0009bb6a0, 0xc00549e280, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0009bb6a0, 0xc0096ef100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0009bb6a0, 0xc0096ef000)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0009bb6a0, 0xc0096ef000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00783e780, 0xc00edd15a0, 0x75ce240, 0xc0009bb6a0, 0xc0096ef000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:50.221646  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.221770  106211 wrap.go:47] GET /healthz: (1.550304ms) 500
goroutine 30021 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e088380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e088380, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a40b6c0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e19b3c0, 0xc00549e780, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e19b3c0, 0xc007a93d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e19b3c0, 0xc007a93c00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e19b3c0, 0xc007a93c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006fa8300, 0xc00edd15a0, 0x75ce240, 0xc00e19b3c0, 0xc007a93c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.231675  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.729014ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.253201  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.745183ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.253485  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I1109 03:38:50.271575  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.603681ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.294900  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.277527ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.295153  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I1109 03:38:50.312509  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (2.374333ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.312651  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.312798  106211 wrap.go:47] GET /healthz: (2.118602ms) 500
goroutine 30068 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0f5c00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0f5c00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a381b40, 0x1f4)
net/http.Error(0x7fb280505648, 0xc009ef8e18, 0xc00e9a97c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc009ef8e18, 0xc00b7af800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc009ef8e18, 0xc00b7af700)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc009ef8e18, 0xc00b7af700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007981440, 0xc00edd15a0, 0x75ce240, 0xc009ef8e18, 0xc00b7af700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:50.321348  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.321523  106211 wrap.go:47] GET /healthz: (1.24859ms) 500
goroutine 29968 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0cd7a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0cd7a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a491360, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a4e60, 0xc00549eb40, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a4e60, 0xc00c1b2200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a4e60, 0xc00c1b2100)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a4e60, 0xc00c1b2100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0053f8c60, 0xc00edd15a0, 0x75ce240, 0xc0039a4e60, 0xc00c1b2100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.332707  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.401094ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.333113  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I1109 03:38:50.351091  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (1.010749ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.372911  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.905037ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.373364  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I1109 03:38:50.391604  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (1.696407ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.412665  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.658234ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.412944  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.413136  106211 wrap.go:47] GET /healthz: (2.081308ms) 500
goroutine 30082 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0cdc00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0cdc00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a258040, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a4f30, 0xc0039ca000, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a4f30, 0xc00c1b2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a4f30, 0xc00c1b2c00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a4f30, 0xc00c1b2c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0053f9320, 0xc00edd15a0, 0x75ce240, 0xc0039a4f30, 0xc00c1b2c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:50.413480  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I1109 03:38:50.421165  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.421404  106211 wrap.go:47] GET /healthz: (1.155225ms) 500
goroutine 30075 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e06eaf0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e06eaf0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a277380, 0x1f4)
net/http.Error(0x7fb280505648, 0xc009ef8f90, 0xc00549ef00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc009ef8f90, 0xc00c41fd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc009ef8f90, 0xc00c41fc00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc009ef8f90, 0xc00c41fc00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007912720, 0xc00edd15a0, 0x75ce240, 0xc009ef8f90, 0xc00c41fc00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.433266  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (3.276803ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.453774  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.81704ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.454016  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I1109 03:38:50.473069  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (3.109941ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.492018  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.099729ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.492763  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I1109 03:38:50.511621  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (1.607558ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.512376  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.512504  106211 wrap.go:47] GET /healthz: (2.033049ms) 500
goroutine 30087 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0662a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0662a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a259760, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a5070, 0xc0039cb400, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a5070, 0xc00b6cc100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a5070, 0xc00b6cc000)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a5070, 0xc00b6cc000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc004f094a0, 0xc00edd15a0, 0x75ce240, 0xc0039a5070, 0xc00b6cc000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:50.521614  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.521928  106211 wrap.go:47] GET /healthz: (1.350353ms) 500
goroutine 30098 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0b3e30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0b3e30, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a323b80, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a113bf8, 0xc0039cb900, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a113bf8, 0xc006e7be00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a113bf8, 0xc006e7bd00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a113bf8, 0xc006e7bd00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00592f320, 0xc00edd15a0, 0x75ce240, 0xc00a113bf8, 0xc006e7bd00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.533551  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.617252ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.533759  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I1109 03:38:50.551854  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (1.769472ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.572855  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.669032ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.574313  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I1109 03:38:50.592894  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (2.21604ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.611122  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.611352  106211 wrap.go:47] GET /healthz: (1.002446ms) 500
goroutine 30047 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0792d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0792d0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a204140, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e5911a0, 0xc00549f7c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e5911a0, 0xc00c2d6d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e5911a0, 0xc00c2d6c00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e5911a0, 0xc00c2d6c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007797200, 0xc00edd15a0, 0x75ce240, 0xc00e5911a0, 0xc00c2d6c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:50.613204  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.296503ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.613541  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I1109 03:38:50.621304  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.621532  106211 wrap.go:47] GET /healthz: (1.338076ms) 500
goroutine 30079 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e06f880, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e06f880, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a1defe0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc009ef9140, 0xc00b86b040, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc009ef9140, 0xc00b8c6100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc009ef9140, 0xc00b8c6000)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc009ef9140, 0xc00b8c6000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0051463c0, 0xc00edd15a0, 0x75ce240, 0xc009ef9140, 0xc00b8c6000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.631579  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.632696ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.655573  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.356377ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.655806  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I1109 03:38:50.671868  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.67679ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.693158  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.169293ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.693683  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I1109 03:38:50.711372  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (1.379443ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:50.712038  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.712213  106211 wrap.go:47] GET /healthz: (1.83446ms) 500
goroutine 30092 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e066540, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e066540, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a122500, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a5138, 0xc0004997c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a5138, 0xc00b6cd700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a5138, 0xc00b6cd600)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a5138, 0xc00b6cd600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc005143bc0, 0xc00edd15a0, 0x75ce240, 0xc0039a5138, 0xc00b6cd600)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:50.721156  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.721368  106211 wrap.go:47] GET /healthz: (1.180131ms) 500
goroutine 30094 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e066620, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e066620, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a122760, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a5150, 0xc002ebac80, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a5150, 0xc00b6cdb00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a5150, 0xc00b6cda00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a5150, 0xc00b6cda00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc005143e60, 0xc00edd15a0, 0x75ce240, 0xc0039a5150, 0xc00b6cda00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.732323  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.258502ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.733541  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I1109 03:38:50.751133  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.28561ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.773727  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.601543ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.774040  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I1109 03:38:50.793027  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (2.040583ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.811508  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.811711  106211 wrap.go:47] GET /healthz: (1.317136ms) 500
goroutine 30112 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e053570, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e053570, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a0751e0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00a113f88, 0xc00363cc80, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00a113f88, 0xc003f91400)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00a113f88, 0xc003f91400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00a113f88, 0xc003f91400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00a113f88, 0xc003f91400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00a113f88, 0xc003f91400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00a113f88, 0xc003f91400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00a113f88, 0xc003f91400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00a113f88, 0xc003f91400)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00a113f88, 0xc003f91400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00a113f88, 0xc003f91400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00a113f88, 0xc003f91400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00a113f88, 0xc003f91300)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00a113f88, 0xc003f91300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc005055380, 0xc00edd15a0, 0x75ce240, 0xc00a113f88, 0xc003f91300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:50.812560  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.489668ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.812798  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I1109 03:38:50.821165  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.821572  106211 wrap.go:47] GET /healthz: (1.361278ms) 500
goroutine 30117 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e02c1c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e02c1c0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a08d160, 0x1f4)
net/http.Error(0x7fb280505648, 0xc009ef92b0, 0xc00b86b680, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc009ef92b0, 0xc00464b000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc009ef92b0, 0xc00464af00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc009ef92b0, 0xc00464af00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0051cee40, 0xc00edd15a0, 0x75ce240, 0xc009ef92b0, 0xc00464af00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.831508  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (1.562464ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.854711  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.020239ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.854999  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I1109 03:38:50.873419  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (3.352478ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.892765  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.775974ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.892983  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I1109 03:38:50.911477  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.911652  106211 wrap.go:47] GET /healthz: (1.27576ms) 500
goroutine 30049 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0797a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0797a0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc00a2051a0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e5912d0, 0xc00549fcc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e5912d0, 0xc00c2d7800)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e5912d0, 0xc00c2d7700)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e5912d0, 0xc00c2d7700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc007797f20, 0xc00edd15a0, 0x75ce240, 0xc00e5912d0, 0xc00c2d7700)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:50.915079  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (4.987332ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.921480  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:50.921656  106211 wrap.go:47] GET /healthz: (1.45744ms) 500
goroutine 30185 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e03fc00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e03fc00, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009fa3ca0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0009bbc50, 0xc001e12a00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0009bbc50, 0xc0059f5700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0009bbc50, 0xc0059f5600)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0009bbc50, 0xc0059f5600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0054d7d40, 0xc00edd15a0, 0x75ce240, 0xc0009bbc50, 0xc0059f5600)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.936076  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.996939ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.936537  106211 storage_rbac.go:223] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I1109 03:38:50.951441  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (1.528708ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.953087  106211 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.260389ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.972632  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (2.48748ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.972910  106211 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I1109 03:38:50.991838  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (1.950345ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:50.994233  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.36514ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.012636  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.012904  106211 wrap.go:47] GET /healthz: (1.9977ms) 500
goroutine 30140 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e067f10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e067f10, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009eee160, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a5820, 0xc00363d180, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a5820, 0xc007850700)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a5820, 0xc007850700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a5820, 0xc007850700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a5820, 0xc007850700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a5820, 0xc007850700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a5820, 0xc007850700)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a5820, 0xc007850700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a5820, 0xc007850700)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a5820, 0xc007850700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a5820, 0xc007850700)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a5820, 0xc007850700)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a5820, 0xc007850100)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a5820, 0xc007850100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0057eaae0, 0xc00edd15a0, 0x75ce240, 0xc0039a5820, 0xc007850100)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:51.014057  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (4.051029ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.014317  106211 storage_rbac.go:254] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I1109 03:38:51.021051  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.021417  106211 wrap.go:47] GET /healthz: (1.273342ms) 500
goroutine 30164 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e00ca80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e00ca80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009f79ee0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc008e681e0, 0xc001e12f00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc008e681e0, 0xc006fe2d00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc008e681e0, 0xc006fe2900)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc008e681e0, 0xc006fe2900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc005055da0, 0xc00edd15a0, 0x75ce240, 0xc008e681e0, 0xc006fe2900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.031095  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (1.167816ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.032804  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.30641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.054664  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (4.641448ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.054915  106211 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I1109 03:38:51.071227  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.193838ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.073464  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.787163ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.093683  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (3.478425ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.093912  106211 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I1109 03:38:51.115351  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.115486  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (5.315062ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.115519  106211 wrap.go:47] GET /healthz: (4.998279ms) 500
goroutine 30210 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00dfdab60, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00dfdab60, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009eefc40, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a5a00, 0xc00b86be00, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a5a00, 0xc010550300)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a5a00, 0xc010550300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a5a00, 0xc010550300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a5a00, 0xc010550300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a5a00, 0xc010550300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a5a00, 0xc010550300)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a5a00, 0xc010550300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a5a00, 0xc010550300)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a5a00, 0xc010550300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a5a00, 0xc010550300)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a5a00, 0xc010550300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a5a00, 0xc010550200)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a5a00, 0xc010550200)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0059c8b40, 0xc00edd15a0, 0x75ce240, 0xc0039a5a00, 0xc010550200)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:51.117214  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.301642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.120949  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.121140  106211 wrap.go:47] GET /healthz: (915.499µs) 500
goroutine 30214 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00dfdb180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00dfdb180, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009e16960, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a5a58, 0xc001e132c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a5a58, 0xc010550e00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a5a58, 0xc010550c00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a5a58, 0xc010550c00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0059c9080, 0xc00edd15a0, 0x75ce240, 0xc0039a5a58, 0xc010550c00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.133576  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (3.73856ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.134009  106211 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I1109 03:38:51.153796  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (3.007902ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.157786  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (3.600025ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.173575  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (3.57897ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.173977  106211 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I1109 03:38:51.191980  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (1.885944ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.194090  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.690869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.213706  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (3.749967ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.215129  106211 storage_rbac.go:254] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I1109 03:38:51.215938  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.216105  106211 wrap.go:47] GET /healthz: (2.761762ms) 500
goroutine 30216 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00dfdb5e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00dfdb5e0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009e177c0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a5ae0, 0xc00363d7c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe100)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe000)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a5ae0, 0xc00b2fe000)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0059c9560, 0xc00edd15a0, 0x75ce240, 0xc0039a5ae0, 0xc00b2fe000)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40818]
I1109 03:38:51.221006  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.221171  106211 wrap.go:47] GET /healthz: (1.022139ms) 500
goroutine 30124 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e02ca80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e02ca80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009d22c20, 0x1f4)
net/http.Error(0x7fb280505648, 0xc009ef93d0, 0xc007be8280, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc009ef93d0, 0xc005f43600)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc009ef93d0, 0xc005f43500)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc009ef93d0, 0xc005f43500)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc009502240, 0xc00edd15a0, 0x75ce240, 0xc009ef93d0, 0xc005f43500)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.233491  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::extension-apiserver-authentication-reader: (3.465233ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.235843  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.95374ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.256629  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (6.633228ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.256863  106211 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system::extension-apiserver-authentication-reader in kube-system
I1109 03:38:51.274062  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (2.719504ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.275746  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.202427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.292075  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.101209ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.292428  106211 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I1109 03:38:51.312263  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (2.228078ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.312394  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.312551  106211 wrap.go:47] GET /healthz: (1.94551ms) 500
goroutine 30242 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e02d8f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e02d8f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009ca4740, 0x1f4)
net/http.Error(0x7fb280505648, 0xc009ef94f8, 0xc002ebb2c0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc009ef94f8, 0xc0122e8400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc009ef94f8, 0xc0122e8300)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc009ef94f8, 0xc0122e8300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc009503140, 0xc00edd15a0, 0x75ce240, 0xc009ef94f8, 0xc0122e8300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:51.314215  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.233677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.321060  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.321260  106211 wrap.go:47] GET /healthz: (1.066105ms) 500
goroutine 30028 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e0898f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e0898f0, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009c3e2e0, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e19b5b0, 0xc00e9a9cc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e19b5b0, 0xc009942a00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e19b5b0, 0xc009942900)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e19b5b0, 0xc009942900)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc006fa91a0, 0xc00edd15a0, 0x75ce240, 0xc00e19b5b0, 0xc009942900)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.333667  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.587782ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.333926  106211 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I1109 03:38:51.353679  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (3.481553ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.356840  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (2.746484ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.372378  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.249774ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.372648  106211 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I1109 03:38:51.391691  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (1.743774ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.393332  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.177607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.412124  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.412334  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.206703ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.412384  106211 wrap.go:47] GET /healthz: (1.959713ms) 500
goroutine 30224 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00dfbc700, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00dfbc700, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009c2c340, 0x1f4)
net/http.Error(0x7fb280505648, 0xc0039a5d08, 0xc004d84140, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc0039a5d08, 0xc0126bb400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc0039a5d08, 0xc011869f00)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc0039a5d08, 0xc011869f00)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc009a50a20, 0xc00edd15a0, 0x75ce240, 0xc0039a5d08, 0xc011869f00)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [Go-http-client/1.1 127.0.0.1:40816]
I1109 03:38:51.412582  106211 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I1109 03:38:51.421057  106211 healthz.go:161] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 03:38:51.421336  106211 wrap.go:47] GET /healthz: (1.138204ms) 500
goroutine 30290 [running]:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).recordStatus(0xc00e01ff80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog.(*respLogger).WriteHeader(0xc00e01ff80, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*baseTimeoutWriter).WriteHeader(0xc009bda660, 0x1f4)
net/http.Error(0x7fb280505648, 0xc00e343348, 0xc000499cc0, 0x136, 0x1f4)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz.handleRootHealthz.func1(0x7fb280505648, 0xc00e343348, 0xc011a13400)
net/http.HandlerFunc.ServeHTTP(0xc006f30f40, 0x7fb280505648, 0xc00e343348, 0xc011a13400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP(0xc006cfb8c0, 0x7fb280505648, 0xc00e343348, 0xc011a13400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/mux.(*PathRecorderMux).ServeHTTP(0xc00db5a690, 0x7fb280505648, 0xc00e343348, 0xc011a13400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4510e98, 0xe, 0xc01056af30, 0xc00db5a690, 0x7fb280505648, 0xc00e343348, 0xc011a13400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7fb280505648, 0xc00e343348, 0xc011a13400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb00, 0x7fb280505648, 0xc00e343348, 0xc011a13400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func1(0x7fb280505648, 0xc00e343348, 0xc011a13400)
net/http.HandlerFunc.ServeHTTP(0xc00edcdef0, 0x7fb280505648, 0xc00e343348, 0xc011a13400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7fb280505648, 0xc00e343348, 0xc011a13400)
net/http.HandlerFunc.ServeHTTP(0xc00edceb40, 0x7fb280505648, 0xc00e343348, 0xc011a13400)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7fb280505648, 0xc00e343348, 0xc011a13300)
net/http.HandlerFunc.ServeHTTP(0xc0105c2730, 0x7fb280505648, 0xc00e343348, 0xc011a13300)
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc00b2f66c0, 0xc00edd15a0, 0x75ce240, 0xc00e343348, 0xc011a13300)
created by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP

logging error output: "[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/ca-registration ok\nhealthz check failed\n"
 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.431227  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (1.308196ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.433055  106211 wrap.go:47] GET /api/v1/namespaces/kube-system: (1.279869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.452357  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.399882ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.452584  106211 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I1109 03:38:51.471380  106211 wrap.go:47] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (1.374241ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.473229  106211 wrap.go:47] GET /api/v1/namespaces/kube-public: (1.44572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.491948  106211 wrap.go:47] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (1.960874ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.492347  106211 storage_rbac.go:284] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I1109 03:38:51.511866  106211 wrap.go:47] GET /healthz: (1.344829ms) 200 [Go-http-client/1.1 127.0.0.1:40816]
W1109 03:38:51.512688  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.512998  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.513065  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.513088  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.513113  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.513154  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.513256  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.513299  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.513323  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
W1109 03:38:51.513345  106211 mutation_detector.go:48] Mutation detector is enabled, this will result in memory leakage.
I1109 03:38:51.513449  106211 factory.go:331] Creating scheduler from algorithm provider 'DefaultProvider'
I1109 03:38:51.513488  106211 factory.go:412] Creating scheduler with fit predicates 'map[CheckNodeCondition:{} CheckNodeDiskPressure:{} CheckNodeMemoryPressure:{} CheckNodePIDPressure:{} CheckVolumeBinding:{} GeneralPredicates:{} MatchInterPodAffinity:{} MaxAzureDiskVolumeCount:{} MaxCSIVolumeCountPred:{} MaxEBSVolumeCount:{} MaxGCEPDVolumeCount:{} NoDiskConflict:{} NoVolumeZoneConflict:{} PodToleratesNodeTaints:{}]' and priority functions 'map[BalancedResourceAllocation:{} ImageLocalityPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} NodeAffinityPriority:{} NodePreferAvoidPodsPriority:{} SelectorSpreadPriority:{} TaintTolerationPriority:{}]'
I1109 03:38:51.513700  106211 controller_utils.go:1027] Waiting for caches to sync for scheduler controller
I1109 03:38:51.514045  106211 reflector.go:123] Starting reflector *v1.Pod (12h0m0s) from k8s.io/kubernetes/test/integration/scheduler/util.go:211
I1109 03:38:51.514087  106211 reflector.go:161] Listing and watching *v1.Pod from k8s.io/kubernetes/test/integration/scheduler/util.go:211
I1109 03:38:51.514959  106211 wrap.go:47] GET /api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: (582.48µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40816]
I1109 03:38:51.515859  106211 get.go:251] Starting watch for /api/v1/pods, rv=22506 labels= fields=status.phase!=Failed,status.phase!=Succeeded timeout=6m35s
I1109 03:38:51.521535  106211 wrap.go:47] GET /healthz: (1.375081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.523020  106211 wrap.go:47] GET /api/v1/namespaces/default: (1.167683ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.525092  106211 wrap.go:47] POST /api/v1/namespaces: (1.663395ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.526396  106211 wrap.go:47] GET /api/v1/namespaces/default/services/kubernetes: (943.455µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.530439  106211 wrap.go:47] POST /api/v1/namespaces/default/services: (3.664406ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.531586  106211 wrap.go:47] GET /api/v1/namespaces/default/endpoints/kubernetes: (829.827µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.533301  106211 wrap.go:47] POST /api/v1/namespaces/default/endpoints: (1.413671ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.613891  106211 shared_informer.go:123] caches populated
I1109 03:38:51.613925  106211 controller_utils.go:1034] Caches are synced for scheduler controller
I1109 03:38:51.614471  106211 reflector.go:123] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.614499  106211 reflector.go:161] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.614589  106211 reflector.go:123] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.614611  106211 reflector.go:161] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.614909  106211 reflector.go:123] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.614931  106211 reflector.go:161] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.615168  106211 reflector.go:123] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.615184  106211 reflector.go:161] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.615283  106211 reflector.go:123] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.615295  106211 reflector.go:161] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.615679  106211 wrap.go:47] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (675.541µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.615695  106211 reflector.go:123] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.615711  106211 reflector.go:161] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.616142  106211 wrap.go:47] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (419.914µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40930]
I1109 03:38:51.616586  106211 wrap.go:47] GET /api/v1/nodes?limit=500&resourceVersion=0: (411.815µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40818]
I1109 03:38:51.616606  106211 wrap.go:47] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (419.531µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40936]
I1109 03:38:51.616621  106211 wrap.go:47] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (389.562µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40932]
I1109 03:38:51.617205  106211 get.go:251] Starting watch for /api/v1/nodes, rv=22506 labels= fields= timeout=8m16s
I1109 03:38:51.617401  106211 wrap.go:47] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (453.263µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40932]
I1109 03:38:51.617403  106211 get.go:251] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=22507 labels= fields= timeout=9m49s
I1109 03:38:51.617558  106211 get.go:251] Starting watch for /api/v1/replicationcontrollers, rv=22506 labels= fields= timeout=9m52s
I1109 03:38:51.617766  106211 get.go:251] Starting watch for /apis/apps/v1/replicasets, rv=22507 labels= fields= timeout=5m9s
I1109 03:38:51.617966  106211 reflector.go:123] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.617978  106211 reflector.go:161] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.618034  106211 get.go:251] Starting watch for /apis/apps/v1/statefulsets, rv=22507 labels= fields= timeout=6m15s
I1109 03:38:51.618113  106211 reflector.go:123] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.618124  106211 reflector.go:161] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.618510  106211 wrap.go:47] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (374.938µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40942]
I1109 03:38:51.618674  106211 wrap.go:47] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (362.389µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40940]
I1109 03:38:51.619135  106211 get.go:251] Starting watch for /api/v1/persistentvolumes, rv=22506 labels= fields= timeout=8m15s
I1109 03:38:51.619297  106211 get.go:251] Starting watch for /api/v1/persistentvolumeclaims, rv=22506 labels= fields= timeout=9m17s
I1109 03:38:51.619808  106211 get.go:251] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=22506 labels= fields= timeout=7m11s
I1109 03:38:51.620045  106211 reflector.go:123] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.620065  106211 reflector.go:161] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:133
I1109 03:38:51.620844  106211 wrap.go:47] GET /api/v1/services?limit=500&resourceVersion=0: (476.1µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40934]
I1109 03:38:51.621743  106211 get.go:251] Starting watch for /api/v1/services, rv=22794 labels= fields= timeout=5m50s
I1109 03:38:51.714211  106211 shared_informer.go:123] caches populated
I1109 03:38:51.814534  106211 shared_informer.go:123] caches populated
I1109 03:38:51.914682  106211 shared_informer.go:123] caches populated
I1109 03:38:52.014927  106211 shared_informer.go:123] caches populated
I1109 03:38:52.115149  106211 shared_informer.go:123] caches populated
I1109 03:38:52.215356  106211 shared_informer.go:123] caches populated
I1109 03:38:52.315547  106211 shared_informer.go:123] caches populated
I1109 03:38:52.415736  106211 shared_informer.go:123] caches populated
I1109 03:38:52.515964  106211 shared_informer.go:123] caches populated
I1109 03:38:52.616155  106211 shared_informer.go:123] caches populated
I1109 03:38:52.617223  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:52.617736  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:52.618929  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:52.619105  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:52.619127  106211 wrap.go:47] POST /api/v1/nodes: (2.396052ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.621526  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:52.622075  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.415902ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.622261  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0
I1109 03:38:52.622286  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0
I1109 03:38:52.622423  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0", node "node1"
I1109 03:38:52.622445  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I1109 03:38:52.622501  106211 factory.go:733] Attempting to bind rpod-0 to node1
I1109 03:38:52.624788  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.323214ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.624905  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0/binding: (1.909687ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41166]
I1109 03:38:52.625079  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:52.625594  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1
I1109 03:38:52.625606  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1
I1109 03:38:52.625681  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1", node "node1"
I1109 03:38:52.625690  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I1109 03:38:52.625725  106211 factory.go:733] Attempting to bind rpod-1 to node1
I1109 03:38:52.627182  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.748698ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41166]
I1109 03:38:52.627382  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1/binding: (1.509612ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.627968  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:52.629456  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.313605ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.727472  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0: (2.078105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.830283  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (1.950424ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.830589  106211 preemption_test.go:561] Creating the preemptor pod...
I1109 03:38:52.833276  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.339726ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.833331  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:52.833439  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:52.833586  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.833663  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.834178  106211 preemption_test.go:567] Creating additional pods...
I1109 03:38:52.837063  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.537687ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41174]
I1109 03:38:52.837727  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/status: (3.404344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.839337  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.173584ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.839556  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.841013  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.163629ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41172]
I1109 03:38:52.841390  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (6.885506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41166]
I1109 03:38:52.842593  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/status: (2.724697ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.844893  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.989628ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41166]
I1109 03:38:52.847050  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.711939ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41166]
I1109 03:38:52.849515  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.084323ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41166]
I1109 03:38:52.850165  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (7.174369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.850632  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:52.850657  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:52.850812  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod", node "node1"
I1109 03:38:52.850824  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I1109 03:38:52.850871  106211 factory.go:733] Attempting to bind preemptor-pod to node1
I1109 03:38:52.851106  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:52.851119  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:52.851268  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.851305  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.852437  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.523633ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41166]
I1109 03:38:52.853553  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/binding: (1.826672ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41174]
I1109 03:38:52.853855  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.16295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41166]
I1109 03:38:52.853912  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:52.854295  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0/status: (2.445395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41176]
I1109 03:38:52.856213  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.286732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41176]
I1109 03:38:52.856341  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.922978ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41174]
I1109 03:38:52.856458  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.856650  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:52.856663  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:52.856748  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.856781  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.858737  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1/status: (1.724076ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41178]
I1109 03:38:52.858890  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.149068ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41176]
I1109 03:38:52.859101  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.772136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41180]
I1109 03:38:52.859722  106211 cacher.go:648] cacher (*core.Pod): 1 objects queued in incoming channel.
I1109 03:38:52.860453  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.078776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41178]
I1109 03:38:52.860684  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.860885  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:52.860922  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:52.861065  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.861173  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.862418  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (9.591018ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.862621  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.218377ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41176]
I1109 03:38:52.863065  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.650577ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41178]
I1109 03:38:52.864330  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2/status: (2.317984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41182]
I1109 03:38:52.865773  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.597502ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.866018  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.299888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41182]
I1109 03:38:52.866157  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.306595ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41176]
I1109 03:38:52.866995  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.867173  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:52.867188  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:52.867322  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.867368  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.868411  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.839425ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41182]
I1109 03:38:52.868600  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.159037ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.870207  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3/status: (2.025461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41184]
I1109 03:38:52.870910  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.931355ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.871600  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (1.003527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41184]
I1109 03:38:52.871953  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.872114  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:52.872138  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:52.872225  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.872279  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.873117  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.290392ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.873488  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (2.359781ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41178]
I1109 03:38:52.874361  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.79341ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41182]
I1109 03:38:52.875644  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4/status: (2.721396ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.875994  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (3.167057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41184]
I1109 03:38:52.878265  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.730782ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41184]
I1109 03:38:52.878326  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (2.314299ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.878670  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.878806  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:52.878820  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:52.878880  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.878917  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.881028  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.434513ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41184]
I1109 03:38:52.881232  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5/status: (2.025838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.883402  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.007972ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41184]
I1109 03:38:52.883761  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (2.158934ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.884121  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.884275  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:52.884296  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:52.884371  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.884413  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.884547  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (5.339987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41178]
I1109 03:38:52.886077  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (12.573329ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.886128  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (1.029373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41178]
I1109 03:38:52.887875  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6/status: (3.076077ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.889232  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (923.793µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.889471  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.889937  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.439721ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:40946]
I1109 03:38:52.891587  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.208952ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.891995  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:52.892051  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:52.892156  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.892224  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.892537  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.445389ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41184]
I1109 03:38:52.894296  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.164746ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.895263  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7/status: (2.518065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41188]
I1109 03:38:52.896358  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.615649ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.896394  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (2.954047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41184]
I1109 03:38:52.896699  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.046515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41188]
I1109 03:38:52.897206  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.897420  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:52.897478  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:52.897581  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.897643  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.897989  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.188353ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41192]
I1109 03:38:52.901806  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (3.470335ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.901846  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.044808ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41194]
I1109 03:38:52.902211  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8/status: (4.12513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41186]
I1109 03:38:52.903983  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.580649ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41192]
I1109 03:38:52.907102  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (1.506961ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41194]
I1109 03:38:52.907578  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.726004ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.908084  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.908297  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:52.908333  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:52.908419  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.908478  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.911460  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.222727ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41200]
I1109 03:38:52.911959  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.421194ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.912367  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (2.647787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41198]
I1109 03:38:52.912791  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9/status: (3.599533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41194]
I1109 03:38:52.915644  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (2.01555ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41198]
I1109 03:38:52.916038  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.200607ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.916620  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.916797  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:52.916811  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:52.916883  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.916914  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.919745  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (2.301719ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41202]
I1109 03:38:52.919767  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (2.168304ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41200]
I1109 03:38:52.919985  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.920451  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-3.15d561656013cb97: (2.600677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41204]
I1109 03:38:52.920570  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:52.920584  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:52.920668  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.920698  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.924499  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.647316ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41206]
I1109 03:38:52.925376  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10/status: (2.99087ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41200]
I1109 03:38:52.925905  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (9.310712ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.927897  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (2.097563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41200]
I1109 03:38:52.928117  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.928283  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:52.928302  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:52.928368  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.928406  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.928672  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.064172ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.931352  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.270901ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.932220  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (9.364105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41202]
I1109 03:38:52.932695  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.236332ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41210]
I1109 03:38:52.933039  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (4.136914ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41206]
I1109 03:38:52.933420  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11/status: (4.240713ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41200]
I1109 03:38:52.935023  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (1.173838ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41206]
I1109 03:38:52.935332  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.935507  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:52.935520  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:52.935645  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.935771  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.936938  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.577062ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.938477  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (1.446739ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41212]
I1109 03:38:52.938689  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (2.745199ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41206]
I1109 03:38:52.939955  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.940121  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-5.15d5616560c403c0: (3.890601ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41208]
I1109 03:38:52.940974  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.314622ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41190]
I1109 03:38:52.941464  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:52.941636  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:52.941788  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.941848  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.944475  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (2.01046ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41206]
I1109 03:38:52.944576  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.041158ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41208]
I1109 03:38:52.946385  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.885698ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41214]
I1109 03:38:52.946860  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.754719ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41206]
I1109 03:38:52.946939  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12/status: (1.963472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41208]
I1109 03:38:52.948660  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (1.166418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41206]
I1109 03:38:52.949060  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.949132  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.750721ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41208]
I1109 03:38:52.949323  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:52.949388  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:52.949607  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.949697  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.951993  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (2.05664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41216]
I1109 03:38:52.952334  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13/status: (1.986899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41214]
I1109 03:38:52.952356  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.358537ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41218]
I1109 03:38:52.953762  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (1.044788ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41214]
I1109 03:38:52.954030  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.954183  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:52.954195  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:52.954288  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.954316  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.955475  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.629171ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41216]
I1109 03:38:52.957752  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.543986ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41222]
I1109 03:38:52.958582  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.833362ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41220]
I1109 03:38:52.959166  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14/status: (2.201003ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41214]
I1109 03:38:52.960627  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (981.184µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41220]
I1109 03:38:52.960814  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.961375  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:52.961434  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:52.961473  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.62651ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41216]
I1109 03:38:52.961577  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.961607  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.963611  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.846282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41220]
I1109 03:38:52.963815  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.963939  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:52.963953  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:52.964028  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.964067  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.965687  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.072906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41226]
I1109 03:38:52.966956  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.812304ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41216]
I1109 03:38:52.967532  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15/status: (2.924049ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41220]
I1109 03:38:52.968516  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (6.425941ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41222]
I1109 03:38:52.969024  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.0763ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41220]
I1109 03:38:52.969416  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.970019  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.055792ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41216]
I1109 03:38:52.970594  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:52.970652  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:52.970770  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.970825  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.973728  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (1.835823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41228]
I1109 03:38:52.973744  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.20376ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41222]
I1109 03:38:52.974470  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16/status: (3.350817ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41226]
I1109 03:38:52.974887  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-7.15d56165618f0428: (12.601623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41224]
I1109 03:38:52.977806  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.228196ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41228]
I1109 03:38:52.978492  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (3.091938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41222]
I1109 03:38:52.979569  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.979841  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:52.979894  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:52.980021  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.145982ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41230]
I1109 03:38:52.980028  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.980154  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.981774  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.895983ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41228]
I1109 03:38:52.982444  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17/status: (2.014188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41222]
I1109 03:38:52.982792  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.779677ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41230]
I1109 03:38:52.983691  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.525144ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41234]
I1109 03:38:52.984286  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.261567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41222]
I1109 03:38:52.984715  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.984925  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:52.984944  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:52.985022  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.985058  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:52.987761  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.614267ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41228]
I1109 03:38:52.988219  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18/status: (2.555098ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41230]
I1109 03:38:52.988624  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (3.212179ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41232]
I1109 03:38:52.991496  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.66536ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41228]
I1109 03:38:52.994055  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.874398ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41228]
I1109 03:38:52.994482  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (4.108752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41230]
I1109 03:38:52.995194  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:52.996011  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:52.996035  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:52.996138  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:52.996191  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.004230  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (6.939332ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41228]
I1109 03:38:53.006439  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (8.387445ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41240]
I1109 03:38:53.006617  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19/status: (7.726337ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41230]
I1109 03:38:53.006980  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (8.336971ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41232]
I1109 03:38:53.009310  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.942963ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41230]
I1109 03:38:53.009707  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.782563ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41228]
I1109 03:38:53.010590  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.010871  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:53.010896  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:53.010977  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.011018  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.013519  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.850254ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41232]
I1109 03:38:53.013936  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.702686ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41244]
I1109 03:38:53.013980  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20/status: (2.704727ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41238]
I1109 03:38:53.014398  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (2.806968ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41242]
I1109 03:38:53.022713  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (7.629676ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41242]
I1109 03:38:53.023091  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (8.499299ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41238]
I1109 03:38:53.023472  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.024635  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:53.024662  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:53.024772  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.024831  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.027942  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-10.15d5616563418b20: (2.234055ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41248]
I1109 03:38:53.029751  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (3.639305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41246]
I1109 03:38:53.030282  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (6.381207ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41238]
I1109 03:38:53.030690  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (4.964487ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41244]
I1109 03:38:53.030943  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.032139  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:53.032201  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:53.032315  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.032370  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.034309  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.299356ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41250]
I1109 03:38:53.035605  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21/status: (2.967892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41246]
I1109 03:38:53.035820  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.230394ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41244]
I1109 03:38:53.036282  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (2.921757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41248]
I1109 03:38:53.038403  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.753193ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41244]
I1109 03:38:53.038908  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (2.808576ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41246]
I1109 03:38:53.039416  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.039661  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:53.039709  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:53.039826  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.039892  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.041607  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (1.5469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41246]
I1109 03:38:53.042042  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.448314ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41252]
I1109 03:38:53.042594  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22/status: (2.198075ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41250]
I1109 03:38:53.042797  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.723314ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41244]
I1109 03:38:53.045876  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (2.563599ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41252]
I1109 03:38:53.045876  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.454695ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41246]
I1109 03:38:53.046184  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.046366  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:53.046385  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:53.046479  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.046524  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.047959  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.001328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41246]
I1109 03:38:53.048870  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.134225ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41256]
I1109 03:38:53.050504  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.333394ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41254]
I1109 03:38:53.050895  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23/status: (3.826428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41252]
I1109 03:38:53.052768  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (951.642µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41246]
I1109 03:38:53.053016  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.053214  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:53.053264  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:53.053371  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.053427  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.053936  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.005898ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41254]
I1109 03:38:53.055913  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.205744ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41254]
I1109 03:38:53.057850  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.912809ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41262]
I1109 03:38:53.058187  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (4.131324ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.058524  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24/status: (4.284274ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41246]
I1109 03:38:53.061292  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.645967ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.061585  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (2.146737ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41254]
I1109 03:38:53.061955  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.062097  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:53.062114  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:53.062201  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.062236  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.063963  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.284812ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.064567  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.156595ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41264]
I1109 03:38:53.065327  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (2.159074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41260]
I1109 03:38:53.066503  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25/status: (3.171262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41254]
I1109 03:38:53.068210  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.215157ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41264]
I1109 03:38:53.068720  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.068917  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:53.068960  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:53.069084  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.069151  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.071221  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26/status: (1.754995ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41264]
I1109 03:38:53.072822  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (909.328µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41264]
I1109 03:38:53.073123  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.073342  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:53.073363  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:53.073436  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.073467  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.074905  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (948.83µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41268]
I1109 03:38:53.075525  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27/status: (1.602496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41264]
I1109 03:38:53.076859  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.051053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41264]
I1109 03:38:53.077267  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.077385  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:53.077400  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:53.077466  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.077547  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.077473  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (7.395992ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.078727  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (8.319028ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41266]
I1109 03:38:53.079390  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (1.19861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41268]
I1109 03:38:53.080540  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.383545ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41266]
I1109 03:38:53.083519  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-13.15d5616564fbff17: (2.066212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41268]
I1109 03:38:53.084845  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (6.310712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41264]
I1109 03:38:53.085264  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.085556  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:53.085617  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:53.085789  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.085865  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.089431  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.420965ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.090428  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28/status: (3.442735ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.092107  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.292286ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.092359  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.092554  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:53.092570  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:53.092663  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.092694  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.096457  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29/status: (2.160632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.096859  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (2.864321ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.099220  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (1.508339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.099464  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.099800  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:53.099817  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:53.099911  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.099943  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.102492  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (2.125768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.104079  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30/status: (3.420693ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.105605  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.141431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.105779  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.105979  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:53.105996  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:53.106148  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.106215  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.108938  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (22.658015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41268]
I1109 03:38:53.109800  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (16.261487ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41272]
I1109 03:38:53.110813  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31/status: (4.209347ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41258]
I1109 03:38:53.110845  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (3.951804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.113350  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.333629ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41272]
I1109 03:38:53.113438  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.2873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.113747  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.115459  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:53.115476  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:53.115572  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.115610  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.116799  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.846553ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.118891  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32/status: (3.048938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41268]
I1109 03:38:53.120032  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (3.850364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.120193  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.32665ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.121280  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (2.066537ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41268]
I1109 03:38:53.121527  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.121682  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:53.121702  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:53.121779  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.121819  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.125239  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (3.02186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.125671  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33/status: (3.451178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.126046  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.548637ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41278]
I1109 03:38:53.127348  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.131617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41270]
I1109 03:38:53.127694  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.127873  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:53.127946  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:53.128088  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.128161  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.129408  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.009936ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.131153  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-17.15d5616566ccbc65: (2.244532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41282]
I1109 03:38:53.131721  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (3.046703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41278]
I1109 03:38:53.132083  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.132321  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:53.132369  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:53.132503  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.132563  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.134018  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (1.165558ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.135596  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.263985ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.137612  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34/status: (4.806949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41282]
I1109 03:38:53.139332  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (1.180252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.139761  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.139975  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:53.139993  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:53.140084  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.140118  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.141536  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (1.051564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.141998  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.142157  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:53.142205  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:53.142303  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.142388  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.143716  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-18.15d56165671793a7: (2.889136ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41286]
I1109 03:38:53.144192  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35/status: (1.55123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.145001  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (4.581863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41284]
I1109 03:38:53.146314  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.005162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41286]
I1109 03:38:53.146572  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.146725  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:53.146743  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:53.146827  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.146857  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.146893  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.677064ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.147050  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (2.313497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41288]
I1109 03:38:53.149755  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.864185ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.150209  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36/status: (3.10087ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41284]
I1109 03:38:53.150562  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.318083ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41288]
I1109 03:38:53.152026  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.034898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.152299  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.152500  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:53.152545  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:53.152669  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.152737  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.155621  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (2.220582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41286]
I1109 03:38:53.155696  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.213592ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41290]
I1109 03:38:53.157133  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37/status: (4.129566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41276]
I1109 03:38:53.158829  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (1.072418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41290]
I1109 03:38:53.159089  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.159504  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:53.159715  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:53.159852  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.159911  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.161684  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.115847ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41292]
I1109 03:38:53.162606  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38/status: (2.041079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41286]
I1109 03:38:53.163125  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (2.990252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41290]
I1109 03:38:53.164101  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.179241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41286]
I1109 03:38:53.164382  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.164559  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:53.164578  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:53.164668  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.164706  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.166817  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39/status: (1.881039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41292]
I1109 03:38:53.168073  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (2.642102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41294]
I1109 03:38:53.168715  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.027788ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41292]
I1109 03:38:53.168784  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.70564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41290]
I1109 03:38:53.169337  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.169483  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.60469ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41296]
I1109 03:38:53.169554  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:53.169569  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:53.169659  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.169690  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.171554  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40/status: (1.676049ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41292]
I1109 03:38:53.172687  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.378562ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41298]
I1109 03:38:53.173065  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (1.065566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41292]
I1109 03:38:53.173595  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.173777  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:53.173797  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:53.173875  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.173918  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.174001  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (4.119397ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41294]
I1109 03:38:53.175790  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.379562ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41294]
I1109 03:38:53.176331  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (2.172861ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41298]
I1109 03:38:53.177223  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41/status: (2.974653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41292]
I1109 03:38:53.179573  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.138874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41298]
I1109 03:38:53.179858  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.180014  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:53.180029  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:53.180113  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.180152  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.181568  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (1.10699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41294]
I1109 03:38:53.183308  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.669453ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41300]
I1109 03:38:53.185188  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42/status: (4.586859ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41298]
I1109 03:38:53.187998  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (2.005458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41300]
I1109 03:38:53.188380  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.188803  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:53.189037  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:53.189588  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.189673  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.191798  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.744497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41300]
I1109 03:38:53.193016  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.602846ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41304]
I1109 03:38:53.193410  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43/status: (2.083282ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41294]
I1109 03:38:53.195103  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.255427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41304]
I1109 03:38:53.195467  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.195684  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:53.195731  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:53.195846  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.195908  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.197943  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.792286ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41304]
I1109 03:38:53.198600  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44/status: (2.116642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41300]
I1109 03:38:53.200090  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.370809ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41306]
I1109 03:38:53.200970  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (2.057291ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41300]
I1109 03:38:53.201484  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.201603  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:53.201616  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:53.201682  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.201716  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.203399  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (969.542µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41304]
I1109 03:38:53.204485  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45/status: (2.036475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41306]
I1109 03:38:53.205987  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.41945ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41308]
I1109 03:38:53.206450  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.369209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41306]
I1109 03:38:53.206733  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.206915  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:53.206943  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:53.207025  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.207068  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.209467  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.797097ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41310]
I1109 03:38:53.210210  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46/status: (2.805808ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41308]
I1109 03:38:53.210775  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (2.588574ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41304]
I1109 03:38:53.212714  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.063307ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41304]
I1109 03:38:53.213011  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.213222  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:53.213278  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:53.213396  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.213458  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.216646  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (2.543362ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41310]
I1109 03:38:53.216971  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.579627ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41312]
I1109 03:38:53.217036  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47/status: (2.611778ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41304]
I1109 03:38:53.218537  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.12462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41304]
I1109 03:38:53.218800  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.219064  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:53.219130  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:53.219330  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.219412  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.221103  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.23975ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41310]
I1109 03:38:53.221114  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.480949ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41312]
I1109 03:38:53.222022  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.222173  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:53.222197  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:53.222312  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.222370  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.222833  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-24.15d561656b2acac8: (2.461157ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41314]
I1109 03:38:53.224084  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.458065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41310]
I1109 03:38:53.225217  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48/status: (2.095665ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41314]
I1109 03:38:53.225617  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.30372ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41312]
I1109 03:38:53.227124  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.30575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41314]
I1109 03:38:53.227397  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.227511  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:53.227529  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:53.227594  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.227633  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.229660  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.310985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41310]
I1109 03:38:53.229678  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.31401ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41316]
I1109 03:38:53.230633  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49/status: (2.779939ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41312]
I1109 03:38:53.232133  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.02955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41316]
I1109 03:38:53.232443  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.232655  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:53.232703  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:53.232827  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.232894  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.234223  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.024379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41316]
I1109 03:38:53.234577  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.234750  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:53.234791  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:53.234944  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.235009  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.236940  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.559765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41320]
I1109 03:38:53.237091  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-26.15d561656c1ab32a: (3.657052ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41310]
I1109 03:38:53.237173  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (2.352808ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41316]
I1109 03:38:53.237674  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (2.305137ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41318]
I1109 03:38:53.237936  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.238077  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:53.238092  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:53.238184  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.238211  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.243674  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (4.960513ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41318]
I1109 03:38:53.243985  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.244199  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:53.244276  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.433525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41322]
I1109 03:38:53.244284  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:53.244525  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.244601  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.246162  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-28.15d561656d19c1a2: (8.290205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41320]
I1109 03:38:53.246477  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.634188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41322]
I1109 03:38:53.246690  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.248346  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (2.249ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41318]
I1109 03:38:53.249287  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:53.249298  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-31.15d561656e4fde50: (2.562173ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41322]
I1109 03:38:53.249305  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:53.249396  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.249451  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.250777  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.070645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41320]
I1109 03:38:53.252597  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (2.672442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41318]
I1109 03:38:53.252930  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.253167  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:53.253215  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:53.253390  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.253570  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.253782  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-32.15d561656edfa862: (3.242045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41324]
I1109 03:38:53.256043  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.95871ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41320]
I1109 03:38:53.256742  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (2.91301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41318]
I1109 03:38:53.257034  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-35.15d5616570783305: (2.596588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41324]
I1109 03:38:53.257209  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.257354  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:53.257387  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:53.257473  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.257506  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.259393  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.450398ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41326]
I1109 03:38:53.259481  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.833639ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41320]
I1109 03:38:53.259867  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.259969  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-36.15d5616570bc7323: (2.294955ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41318]
I1109 03:38:53.260089  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:53.260135  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:53.260309  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.260388  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.263187  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (2.06731ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41326]
I1109 03:38:53.263493  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.263629  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (2.720218ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.263699  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:53.263726  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:53.263806  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-38.15d561657183a765: (2.930811ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41320]
I1109 03:38:53.263867  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.263950  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.265434  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (1.249982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.265916  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (1.245823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41326]
I1109 03:38:53.266259  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.266451  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:53.266505  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:53.266614  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.266678  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.268624  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.740205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.269175  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-39.15d5616571ccb307: (3.338088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.269414  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.620982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41326]
I1109 03:38:53.270142  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.270869  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:53.270936  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:53.271033  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.271066  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.272112  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.900781ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.272436  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.197246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.272656  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-40.15d561657218dc54: (2.653479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41326]
I1109 03:38:53.272672  106211 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I1109 03:38:53.272802  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.273140  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:53.273183  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:53.273313  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:53.273380  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:53.274271  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.245874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.275275  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (3.715058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41332]
I1109 03:38:53.275385  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.81806ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.275899  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:53.276158  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.283463ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41334]
I1109 03:38:53.277991  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (2.177264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41332]
I1109 03:38:53.279641  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.115116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.281404  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (1.432058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.282121  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-41.15d5616572595de8: (2.529337ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.283621  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (1.609373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.286130  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-46.15d561657453364d: (3.294216ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.287355  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (2.973793ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.288960  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (1.155506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.290112  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-47.15d5616574b4a92c: (3.09542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.291788  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.974679ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.293483  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (1.311885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.295360  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (1.512718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.297840  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (2.050999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.301119  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (2.568787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.303020  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (1.370469ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.304741  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (1.154053ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.306444  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.314455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.308075  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.121959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.309690  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (1.142746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.311350  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.169605ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.312856  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (1.087175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.314742  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.393381ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.316562  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.333819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.318098  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (997.205µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.319941  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (1.37773ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.321597  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.218719ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.323283  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.284175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.324978  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.255622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.326724  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.209566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.328213  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.124879ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.330055  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.222227ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.332044  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (1.318295ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.333706  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.215692ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.335560  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.334575ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.337382  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.326964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.338947  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.100233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.340455  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (1.040306ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.341917  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.117349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.343352  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.037804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.345294  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (1.316379ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.346599  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (947.51µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.348095  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.079476ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.349462  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (972.011µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.351142  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.209183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.352786  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (1.071079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.354580  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.20703ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.356182  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.041229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.357796  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.184952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.359322  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.051974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.360834  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.079369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.362405  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.080897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.363943  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.077234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.364321  106211 preemption_test.go:598] Cleaning up all pods...
I1109 03:38:53.368025  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:53.368074  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:53.369066  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (4.50111ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.370371  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.414294ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.372594  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:53.372623  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:53.374306  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.340247ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.374393  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (4.861738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.377677  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:53.377704  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:53.379433  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.515021ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.380903  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (6.093853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.384222  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:53.384312  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:53.386685  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.719548ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.387372  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (5.981853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.390463  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:53.390494  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:53.392439  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (4.713302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.392841  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.0439ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.395892  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:53.395930  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:53.397965  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.791501ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.399006  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (6.034981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.402354  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:53.402388  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:53.404271  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (4.951253ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.404309  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.581148ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.407493  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:53.407537  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:53.409669  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.794127ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.410192  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (5.402375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.413236  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:53.413375  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:53.414506  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (3.984475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.415014  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.394389ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.417573  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:53.417683  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:53.419561  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.537828ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.420575  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (5.700844ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.424105  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:53.424208  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:53.425392  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (3.809103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.426427  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.858376ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.429267  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:53.429347  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:53.430504  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (4.204213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.431282  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.584821ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.433577  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:53.433613  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:53.435720  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (4.773159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.435936  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.019998ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.439333  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:53.439359  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:53.450453  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (10.811685ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.451957  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (15.803586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.455908  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:53.456006  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:53.457993  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.559172ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.458014  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (5.610239ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.461611  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:53.461649  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:53.463046  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (4.498486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.464162  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.527407ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.466340  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:53.466373  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:53.467912  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (4.128222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.468703  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.045615ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.470582  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:53.470616  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:53.472415  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.570935ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.473073  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (4.84651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.476323  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:53.476352  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:53.478712  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.026982ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.479208  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (5.645929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.482502  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:53.482533  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:53.484141  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (4.477797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.484411  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.537244ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.487144  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:53.487227  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:53.488908  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.333773ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.488943  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (4.440335ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.492454  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:53.492533  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:53.493576  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (4.149747ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.494509  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.710336ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.496738  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:53.496815  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:53.497448  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (3.583515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.498451  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.320126ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.500362  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:53.500390  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:53.501616  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (3.746089ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.503517  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.764228ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.504982  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:53.505035  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:53.505487  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (3.577238ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.506957  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.572185ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.508324  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:53.508388  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:53.509475  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (3.585589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.510943  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.260063ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.513673  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:53.513703  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:53.515702  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (5.561585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.516041  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.0964ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.523407  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:53.523446  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:53.524693  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (8.660029ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.525170  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.471262ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.527660  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:53.527692  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:53.529066  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (4.030219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.529577  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.640004ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.532211  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:53.532258  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:53.533767  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (4.235248ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.534104  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.555982ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.537016  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:53.537043  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:53.538834  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (4.517215ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.538864  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.551534ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.542145  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:53.542178  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:53.543083  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (3.805255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.543970  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.505167ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.546309  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:53.546346  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:53.547851  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.263279ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.548605  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (5.11873ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.552827  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:53.552914  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:53.552975  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (4.02562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.554740  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.408677ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.556069  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:53.556157  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:53.557751  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.301396ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.557931  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (4.599178ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.560989  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:53.561026  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:53.562647  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.395456ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.563072  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (4.773123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.566395  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:53.566426  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:53.567965  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.33035ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.569009  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (5.601907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.572763  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:53.572799  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:53.574458  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (4.965088ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.574877  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.80878ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.577438  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:53.577506  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:53.579669  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.869558ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.580321  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (5.491793ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.583536  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:53.583608  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:53.585435  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.527495ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.585513  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (4.757191ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.588717  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:53.588804  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:53.589906  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (3.956611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.590734  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.589176ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.594389  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:53.594480  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:53.595185  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (4.819449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.596287  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.509136ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.598373  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:53.598402  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:53.600061  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.342042ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.600084  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (4.45373ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.603285  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:53.603318  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:53.604221  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (3.726948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.605215  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.503192ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.607583  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:53.607651  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:53.608826  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (4.016892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.609843  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.41501ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.612088  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:53.612154  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:53.614634  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (5.47624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.614854  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.869287ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.617364  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:53.617877  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:53.617891  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:53.617924  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:53.619090  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:53.619271  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:53.619800  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.615175ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.621671  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:53.622039  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (7.082132ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.625886  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:53.625918  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:53.627883  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (5.428116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.629752  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.58227ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.631974  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:53.632325  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:53.634064  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (4.869508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.634562  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.827653ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.637006  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:53.637073  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:53.638439  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (3.692793ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.641470  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.116117ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.644644  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0: (5.790746ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.645703  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (825.629µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.649894  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (3.784002ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.652766  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.304492ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.655412  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.050723ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.658314  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.252ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.661018  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (1.17027ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.664038  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (1.390885ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.666748  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (1.111707ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.669391  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (955.297µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.672103  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.181949ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.674807  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (1.135709ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.678008  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (1.688384ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.680623  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (999.946µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.683571  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (1.378945ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.686213  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (1.040351ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.689109  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (1.25727ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.691943  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.271327ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.694766  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.16611ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.697599  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (1.216446ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.700580  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.350526ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.703399  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (1.181351ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.706410  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.210846ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.709223  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.103895ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.712023  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (1.092669ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.714445  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (969.442µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.717206  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.056711ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.719943  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.140044ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.722527  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.026474ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.725223  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.09191ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.727990  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.133735ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.730493  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.010735ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.733199  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (1.000792ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.735852  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.059886ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.738537  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.197761ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.741329  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.307725ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.744047  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.099405ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.746664  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (920.443µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.749023  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (826.867µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.751574  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (879.71µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.753941  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (877.94µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.756427  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (922.417µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.759195  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.012002ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.761745  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (991.598µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.764237  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (915.544µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.766709  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (892.599µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.769855  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.49765ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.777813  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (6.414343ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.795181  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.43084ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.798143  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.267227ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.800691  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.035569ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.803534  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.078426ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.805926  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (923.136µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.808353  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0: (888.671µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.810756  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (893.11µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.813474  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.095713ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.816918  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0
I1109 03:38:53.816942  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0
I1109 03:38:53.817068  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0", node "node1"
I1109 03:38:53.817083  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I1109 03:38:53.817131  106211 factory.go:733] Attempting to bind rpod-0 to node1
I1109 03:38:53.819048  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0/binding: (1.661353ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.819260  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:53.819535  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.540107ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.821319  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.54373ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:53.822130  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.120675ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.822256  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1
I1109 03:38:53.822275  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1
I1109 03:38:53.822423  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1", node "node1"
I1109 03:38:53.822446  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I1109 03:38:53.822510  106211 factory.go:733] Attempting to bind rpod-1 to node1
I1109 03:38:53.824514  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1/binding: (1.731746ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.824677  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:53.826677  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.769467ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:53.924745  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0: (1.906981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.027699  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (2.150227ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.028231  106211 preemption_test.go:561] Creating the preemptor pod...
I1109 03:38:54.030882  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:54.030908  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:54.031017  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.031061  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.034178  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.636069ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.034448  106211 preemption_test.go:567] Creating additional pods...
I1109 03:38:54.035973  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.896185ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41362]
I1109 03:38:54.036487  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (3.700757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41360]
I1109 03:38:54.036814  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/status: (3.712056ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41330]
I1109 03:38:54.038410  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.815996ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.041633  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.452437ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41360]
I1109 03:38:54.042102  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.295708ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.042857  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.046056  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/status: (2.838905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41360]
I1109 03:38:54.046461  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.153155ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.049908  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.621405ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.054740  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.099238ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.055884  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (8.406661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41360]
I1109 03:38:54.056807  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:54.056830  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:54.056952  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.056987  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.057943  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.350916ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.060504  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.156424ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41360]
I1109 03:38:54.060516  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.186219ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.060805  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (2.943064ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41364]
I1109 03:38:54.061320  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0/status: (3.468326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41362]
I1109 03:38:54.063186  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.948979ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.063512  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.297694ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41360]
I1109 03:38:54.064141  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.622226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41362]
I1109 03:38:54.064412  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.065373  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:54.065389  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:54.065557  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.065594  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.068457  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1/status: (2.009468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41362]
I1109 03:38:54.069161  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.477438ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.069972  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.865552ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.070084  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.263081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41362]
I1109 03:38:54.072185  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.072562  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:54.072891  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:54.072861  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (6.786229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41364]
I1109 03:38:54.073657  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.073765  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.073880  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.813549ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.076403  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.126268ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41362]
I1109 03:38:54.077972  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.326041ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.079005  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2/status: (2.282032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41362]
I1109 03:38:54.079687  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (3.661596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.080473  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.037206ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41328]
I1109 03:38:54.081993  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.071074ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.082476  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.082666  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:54.082729  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:54.082833  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.082871  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.086391  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.358959ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41368]
I1109 03:38:54.087456  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3/status: (4.304772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.087981  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.605842ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I1109 03:38:54.089170  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (4.576429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41370]
I1109 03:38:54.089872  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.79891ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41368]
I1109 03:38:54.092471  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.092275ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41370]
I1109 03:38:54.092813  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (2.121566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.093675  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.093909  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:54.093950  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:54.094174  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.095823  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.095689  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.59057ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41370]
I1109 03:38:54.098976  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (2.89834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.100029  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4/status: (3.545345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I1109 03:38:54.100295  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.280909ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41374]
I1109 03:38:54.101097  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.308409ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41370]
I1109 03:38:54.102904  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (2.469413ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I1109 03:38:54.104152  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.104634  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.047257ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41374]
I1109 03:38:54.105119  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:54.105136  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:54.105255  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.105294  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.114604  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.691874ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41380]
I1109 03:38:54.115598  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5/status: (7.460622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.116429  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (7.682956ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41372]
I1109 03:38:54.117562  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (11.37817ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I1109 03:38:54.121488  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.59318ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41380]
I1109 03:38:54.122225  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (3.308048ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.123351  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.123579  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:54.123618  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:54.123856  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.123938  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.125821  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.570448ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.126132  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.126405  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:54.126425  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:54.126535  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.126574  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.127125  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.766641ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41380]
I1109 03:38:54.127488  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (2.797749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I1109 03:38:54.129119  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (2.0269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41384]
I1109 03:38:54.129692  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-0.15d56165a6fbe665: (4.912072ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41382]
I1109 03:38:54.130345  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6/status: (3.496382ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.130452  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.855707ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41380]
I1109 03:38:54.132224  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (1.165902ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.132564  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.132720  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:54.132732  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:54.132809  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.132840  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.133590  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.405181ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41384]
I1109 03:38:54.135405  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7/status: (2.350236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.135505  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.400094ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I1109 03:38:54.137652  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.36281ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41386]
I1109 03:38:54.138400  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (2.806967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41384]
I1109 03:38:54.138457  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (2.398823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41378]
I1109 03:38:54.138676  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.643412ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41366]
I1109 03:38:54.138754  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.138919  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:54.138944  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:54.139026  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.139056  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.142351  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.719658ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41390]
I1109 03:38:54.143134  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8/status: (3.843586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41386]
I1109 03:38:54.143220  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.810445ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41384]
I1109 03:38:54.143538  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (3.545805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41388]
I1109 03:38:54.145783  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.806504ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41390]
I1109 03:38:54.146329  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (1.457951ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41394]
I1109 03:38:54.147871  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.148164  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:54.148204  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:54.148322  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.148379  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.149397  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.225273ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41390]
I1109 03:38:54.149811  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.165072ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41394]
I1109 03:38:54.150347  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.150531  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:54.150546  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:54.150642  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.150675  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.152463  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (2.060573ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41390]
I1109 03:38:54.152942  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.529251ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41394]
I1109 03:38:54.153970  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9/status: (2.019051ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41396]
I1109 03:38:54.154489  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (3.011731ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41398]
I1109 03:38:54.157461  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-1.15d56165a77f3e10: (3.550932ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41390]
I1109 03:38:54.159857  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (4.978617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41396]
I1109 03:38:54.160108  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.160547  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.343349ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41398]
I1109 03:38:54.161233  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (7.205387ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41394]
I1109 03:38:54.162417  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:54.162434  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:54.162624  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.162695  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.165217  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (1.976281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41402]
I1109 03:38:54.166076  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.412363ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41396]
I1109 03:38:54.168676  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.144409ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41396]
I1109 03:38:54.168697  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10/status: (5.603428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.169815  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.655457ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41404]
I1109 03:38:54.170561  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (1.430717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.171010  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.171258  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:54.171307  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:54.171283  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.96346ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41402]
I1109 03:38:54.171588  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.171668  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.173831  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.481867ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41404]
I1109 03:38:54.176161  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-2.15d56165a7fbbb8d: (3.755453ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41406]
I1109 03:38:54.176194  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (3.320752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.176441  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.176948  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.3015ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41402]
I1109 03:38:54.177176  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:54.177197  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:54.177309  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.178820  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (1.304938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41404]
I1109 03:38:54.179365  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.973815ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.180041  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.182377  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.900693ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41408]
I1109 03:38:54.183552  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11/status: (2.027715ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41404]
I1109 03:38:54.183708  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.912682ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.185328  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (1.401609ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41404]
I1109 03:38:54.185571  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.185769  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:54.185794  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:54.185900  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.185952  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.187665  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.461426ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.188726  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.083745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41412]
I1109 03:38:54.188742  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (2.579791ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41404]
I1109 03:38:54.189274  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12/status: (2.791338ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41408]
I1109 03:38:54.191016  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.863253ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.193109  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (3.375718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41412]
I1109 03:38:54.193376  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.193992  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:54.194017  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:54.194091  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.562576ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.194110  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.194143  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.196833  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.541771ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.196894  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.588929ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41416]
I1109 03:38:54.197168  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (2.515455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41404]
I1109 03:38:54.198465  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13/status: (3.877571ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41412]
I1109 03:38:54.201482  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.148408ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.201508  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (2.670777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41404]
I1109 03:38:54.201845  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.202107  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:54.202290  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:54.202456  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.202528  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.204753  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.289947ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41414]
I1109 03:38:54.207281  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.431547ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41418]
I1109 03:38:54.207281  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.877086ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41414]
I1109 03:38:54.207835  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14/status: (5.028212ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.210216  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.882404ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41418]
I1109 03:38:54.211561  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.459839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.212467  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.212710  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.789486ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41418]
I1109 03:38:54.212720  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:54.212797  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:54.212926  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.212966  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.214717  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.134405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41422]
I1109 03:38:54.215991  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.497327ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41424]
I1109 03:38:54.218183  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.820352ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41420]
I1109 03:38:54.219698  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15/status: (6.499807ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.221608  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.93202ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41424]
I1109 03:38:54.222386  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.635541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.222668  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.223314  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:54.223372  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:54.223515  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.223576  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.223931  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.924837ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41424]
I1109 03:38:54.225815  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16/status: (1.92842ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41422]
I1109 03:38:54.227870  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.660942ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41424]
I1109 03:38:54.229423  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (3.258008ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41422]
I1109 03:38:54.229938  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (6.054288ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.230537  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.997662ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41426]
I1109 03:38:54.232371  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.232580  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:54.232595  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:54.232675  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.232710  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.233923  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.939189ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.235008  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.497985ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41430]
I1109 03:38:54.235530  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (2.196553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41428]
I1109 03:38:54.235991  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17/status: (3.058826ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41424]
I1109 03:38:54.238408  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.977961ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.239590  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.944127ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41428]
I1109 03:38:54.239775  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.239886  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:54.239897  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:54.239976  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.240009  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.241853  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.797736ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.243300  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (2.177865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41430]
I1109 03:38:54.243458  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (2.798328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41428]
I1109 03:38:54.244568  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.244796  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:54.244820  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:54.244923  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.244963  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.245236  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-5.15d56165a9dcf792: (3.354799ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41432]
I1109 03:38:54.247624  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.906401ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41432]
I1109 03:38:54.248039  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18/status: (1.814613ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41430]
I1109 03:38:54.248387  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (3.037865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.249584  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (1.156945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41430]
I1109 03:38:54.249838  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.250113  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:54.250136  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:54.250363  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.250423  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.251799  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.048205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41432]
I1109 03:38:54.252666  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19/status: (1.966745ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.253418  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.263615ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41434]
I1109 03:38:54.254089  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.093741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41400]
I1109 03:38:54.254836  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.255984  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:54.256012  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:54.256150  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.256187  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.258412  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20/status: (1.824333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41434]
I1109 03:38:54.259842  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (3.354611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41432]
I1109 03:38:54.260115  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.203926ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41436]
I1109 03:38:54.261937  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (2.684427ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41434]
I1109 03:38:54.262272  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.262593  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:54.262620  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:54.262719  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.262760  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.264991  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21/status: (1.967341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41436]
I1109 03:38:54.265052  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (1.54208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41432]
I1109 03:38:54.265393  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.617283ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41438]
I1109 03:38:54.267191  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (1.103804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41432]
I1109 03:38:54.267477  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.267785  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:54.267832  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:54.267971  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.268037  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.270066  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.475428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41432]
I1109 03:38:54.270327  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.845305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41436]
I1109 03:38:54.271724  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.271923  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:54.272044  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:54.271940  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-7.15d56165ab8156b2: (2.710406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41440]
I1109 03:38:54.272299  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.272446  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.274145  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (1.457881ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41436]
I1109 03:38:54.275373  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.16288ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41442]
I1109 03:38:54.275950  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22/status: (3.083695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41432]
I1109 03:38:54.278186  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (1.614395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41442]
I1109 03:38:54.278486  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.278689  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:54.278704  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:54.278796  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.278835  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.281506  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.86161ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41444]
I1109 03:38:54.281550  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23/status: (2.383551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41442]
I1109 03:38:54.283275  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.226913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41442]
I1109 03:38:54.283510  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.283800  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:54.283819  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:54.283939  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.283974  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.285724  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.419885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41444]
I1109 03:38:54.286093  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24/status: (1.744676ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41442]
I1109 03:38:54.286963  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (7.591434ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41436]
I1109 03:38:54.287193  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.380497ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41446]
I1109 03:38:54.288958  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.604066ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41442]
I1109 03:38:54.289324  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.289511  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:54.289544  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:54.289666  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.289698  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.293129  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.817932ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41448]
I1109 03:38:54.293702  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25/status: (3.783957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41446]
I1109 03:38:54.293705  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (3.710095ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41444]
I1109 03:38:54.295915  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.382749ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41446]
I1109 03:38:54.296175  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.296335  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:54.296355  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:54.296428  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.296464  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.298128  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.050129ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41448]
I1109 03:38:54.300080  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26/status: (2.701108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41444]
I1109 03:38:54.300334  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.694357ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41450]
I1109 03:38:54.301569  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.095424ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41444]
I1109 03:38:54.301989  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.302146  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:54.302187  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:54.302327  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.302410  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.303904  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.263495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41448]
I1109 03:38:54.304336  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27/status: (1.504684ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41450]
I1109 03:38:54.304821  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.73994ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41454]
I1109 03:38:54.306126  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.30544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41450]
I1109 03:38:54.306746  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.306909  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:54.306929  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:54.307056  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.307095  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.309401  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.742454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41448]
I1109 03:38:54.309410  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.480407ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41456]
I1109 03:38:54.311051  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28/status: (3.358862ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41454]
I1109 03:38:54.312845  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.30847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41456]
I1109 03:38:54.313213  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.313534  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:54.313557  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:54.313697  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.313768  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.316113  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (1.606217ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41448]
I1109 03:38:54.316143  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.483475ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.316527  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29/status: (2.4758ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41456]
I1109 03:38:54.318058  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (1.157814ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41448]
I1109 03:38:54.318351  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.318521  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:54.318540  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:54.318633  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.318684  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.320318  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.405904ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.321668  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30/status: (2.770664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41448]
I1109 03:38:54.322282  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.035187ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41460]
I1109 03:38:54.323571  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.537718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41448]
I1109 03:38:54.323887  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.324160  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:54.324201  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:54.324401  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.324495  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.326759  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.914373ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.327202  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (2.44263ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41460]
I1109 03:38:54.328042  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31/status: (2.256828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41462]
I1109 03:38:54.330104  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.521175ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41460]
I1109 03:38:54.330452  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.330698  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:54.330712  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:54.330823  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.330865  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.332346  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.118557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41460]
I1109 03:38:54.332722  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.513029ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.333170  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32/status: (1.596419ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41464]
I1109 03:38:54.335554  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.204041ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.335776  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.335894  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:54.335944  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:54.336120  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.336201  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.337558  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.10255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.340371  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.149148ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41460]
I1109 03:38:54.340438  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33/status: (2.065391ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.341904  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.074218ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.342185  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.342598  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:54.342656  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:54.342790  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.342847  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.343528  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.119211ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.345132  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (1.942997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41460]
I1109 03:38:54.346887  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.708296ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41468]
I1109 03:38:54.347677  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34/status: (3.256911ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41458]
I1109 03:38:54.350157  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (2.03819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41460]
I1109 03:38:54.350421  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.350735  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:54.350770  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:54.350867  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.350915  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.355046  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.242986ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41470]
I1109 03:38:54.356377  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (4.784342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41466]
I1109 03:38:54.357013  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35/status: (5.111681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41460]
I1109 03:38:54.358818  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.308969ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41466]
I1109 03:38:54.359087  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.359301  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:54.359345  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:54.359456  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.359538  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.361926  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (1.507411ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41470]
I1109 03:38:54.362266  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.363222  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:54.363300  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:54.363465  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.363517  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.363524  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-12.15d56165aeaba25a: (2.457152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41474]
I1109 03:38:54.365592  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.23009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41474]
I1109 03:38:54.367838  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.902911ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41474]
I1109 03:38:54.369795  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36/status: (5.95581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41470]
I1109 03:38:54.370266  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (10.382483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41466]
I1109 03:38:54.371935  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.097701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41474]
I1109 03:38:54.372289  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.372577  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:54.372631  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:54.372742  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.372804  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.375062  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (1.628259ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41476]
I1109 03:38:54.376206  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.527416ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41480]
I1109 03:38:54.378402  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37/status: (5.345458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41474]
I1109 03:38:54.381085  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (2.259171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41480]
I1109 03:38:54.381359  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.381660  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:54.381707  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:54.381850  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.381925  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.383519  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.326541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41480]
I1109 03:38:54.384788  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.178405ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41482]
I1109 03:38:54.386614  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38/status: (2.702538ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41476]
I1109 03:38:54.389519  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.397377ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41482]
I1109 03:38:54.391100  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.391318  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:54.391388  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:54.391499  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.391566  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.394221  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.679879ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.394850  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39/status: (2.420199ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41482]
I1109 03:38:54.394850  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (2.474796ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41480]
I1109 03:38:54.396401  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.22564ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41482]
I1109 03:38:54.396686  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.396883  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:54.396908  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:54.397022  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.397100  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.399541  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (1.123593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.399772  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40/status: (2.249234ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41482]
I1109 03:38:54.400883  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.662963ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41488]
I1109 03:38:54.401954  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (1.141957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41482]
I1109 03:38:54.402256  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.402460  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:54.402476  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:54.402554  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.402623  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.405011  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.672404ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.405011  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41/status: (2.163398ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41488]
I1109 03:38:54.405889  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.553375ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.406851  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.187675ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41488]
I1109 03:38:54.407158  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.407360  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:54.407400  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:54.407490  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.407554  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.410927  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42/status: (2.276154ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.411040  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (2.7571ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.412812  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (1.516153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.413064  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.181974ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41492]
I1109 03:38:54.413080  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.413405  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:54.413428  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:54.413515  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.413560  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.415687  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.773876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.416691  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43/status: (2.737717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.417816  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.768221ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.419457  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.379067ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.419752  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.419994  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:54.420010  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:54.420127  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.420167  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.421790  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.285917ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.423046  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.593109ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41494]
I1109 03:38:54.423105  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44/status: (2.435781ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.424865  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.436673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.425102  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.425281  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:54.425302  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:54.425412  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.425448  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.427862  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45/status: (2.156552ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.430095  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.229832ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.430190  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.792123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.430493  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.430670  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:54.430689  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:54.430812  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.430849  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.433254  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (1.327059ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.433471  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.433576  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (1.628523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.435444  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.296475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41496]
I1109 03:38:54.435776  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:54.435803  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:54.435920  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.435956  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.436960  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-16.15d56165b0e9d7d6: (3.292394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.438053  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.420816ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.439413  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.935602ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.442174  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46/status: (2.295323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41496]
I1109 03:38:54.444196  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.406135ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.444484  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.444657  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:54.444678  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:54.444850  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.444910  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.448201  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47/status: (3.042963ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.448612  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.761532ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.448988  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (2.68365ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41498]
I1109 03:38:54.450463  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.472943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41490]
I1109 03:38:54.451775  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.451926  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.302461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41486]
I1109 03:38:54.452084  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:54.452104  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:54.452218  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.452276  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.458267  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.177334ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41502]
I1109 03:38:54.458684  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48/status: (5.915204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.458823  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (6.236785ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41498]
I1109 03:38:54.460496  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.389767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.460800  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.460955  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:54.460965  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:54.461044  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.461092  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.478978  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49/status: (2.401648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.479167  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.602984ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41504]
I1109 03:38:54.480455  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.07104ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.480702  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.481141  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:54.481164  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:54.481285  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.481327  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.485749  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (2.443976ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.486141  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (3.118889ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41504]
I1109 03:38:54.486587  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.487742  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-18.15d56165b23036f0: (5.155888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41506]
I1109 03:38:54.488154  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:54.488176  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (11.683326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41502]
I1109 03:38:54.488185  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:54.488344  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.488417  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.491047  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.525864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.491488  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (2.371264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41504]
I1109 03:38:54.491812  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.491969  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:54.491990  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:54.492115  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.492157  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.540937  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-20.15d56165b2db81dd: (48.954837ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41504]
I1109 03:38:54.543115  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (50.373123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41510]
I1109 03:38:54.543894  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (50.915472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.544960  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.545119  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-21.15d56165b33fc6ea: (3.297792ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41504]
I1109 03:38:54.545173  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:54.545190  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:54.545320  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.545358  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.547472  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.841406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.548344  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.548512  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:54.548524  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:54.548617  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.548654  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.548689  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.966544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41510]
I1109 03:38:54.550110  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-23.15d56165b4350b37: (3.988255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41514]
I1109 03:38:54.550141  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.340443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.550396  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.550798  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:54.550816  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:54.550901  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.550929  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.570201  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (18.65499ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.570777  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (21.532563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41510]
I1109 03:38:54.580078  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (10.26593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41518]
I1109 03:38:54.580639  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (10.364934ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41516]
I1109 03:38:54.580932  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.581127  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:54.581143  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:54.581470  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.581510  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.595406  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (12.885518ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41510]
I1109 03:38:54.596315  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (14.347492ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.596610  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.596804  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:54.596816  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:54.596914  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.596950  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.598937  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.684904ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.599215  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.599487  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:54.599504  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:54.599591  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.599622  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.602209  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.505394ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.602752  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (2.44937ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41522]
I1109 03:38:54.603305  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.603448  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:54.603461  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:54.603538  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:54.603567  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:54.605394  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.522501ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41522]
I1109 03:38:54.606151  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.976323ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.606373  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:54.607766  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.145566ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41510]
I1109 03:38:54.608014  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-41.15d56165bb955b38: (36.348005ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41500]
I1109 03:38:54.612854  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-42.15d56165bbe12201: (2.64807ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.617280  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-45.15d56165bcf235ee: (3.584446ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.617460  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:54.618179  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:54.619351  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:54.619396  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:54.621232  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-47.15d56165be1b05e1: (2.94194ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.622234  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:54.624686  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-48.15d56165be8b907f: (2.828473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.627515  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-49.15d56165bf11cee4: (2.277893ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.646644  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.865058ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.753585  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (8.515101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.846693  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.911725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:54.946879  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.135823ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.046551  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.786462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.151503  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.515043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.257908  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.894974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.416515  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (11.017037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.446961  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.17077ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.514965  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:55.515003  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:55.515184  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod", node "node1"
I1109 03:38:55.515214  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I1109 03:38:55.515307  106211 cache.go:643] Couldn't expire cache for pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod. Binding is still in progress.
I1109 03:38:55.515358  106211 factory.go:733] Attempting to bind preemptor-pod to node1
I1109 03:38:55.516578  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:55.516611  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:55.516727  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.516768  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.519475  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/binding: (3.765761ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.519664  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:55.522046  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (3.718066ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41522]
I1109 03:38:55.522351  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.522797  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (2.836011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41806]
I1109 03:38:55.522982  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:55.522998  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:55.523114  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.523150  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.523412  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.524728  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (1.254834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41522]
I1109 03:38:55.524957  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.525538  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (2.091148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41806]
I1109 03:38:55.525819  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.525955  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:55.525967  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:55.526047  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.526089  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.527625  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.309688ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41522]
I1109 03:38:55.528064  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.321958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.528440  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.528748  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.528826  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:55.528848  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:55.528952  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.529004  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.531602  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-3.15d56165a886d3e2: (13.69121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41810]
I1109 03:38:55.533417  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.414431ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41810]
I1109 03:38:55.535995  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-4.15d56165a94c4e54: (2.018341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41810]
I1109 03:38:55.537064  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (7.430903ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41522]
I1109 03:38:55.537419  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (8.200464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.537835  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.538428  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:55.538444  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:55.538523  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.538551  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.539926  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.542787  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (3.683598ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41822]
I1109 03:38:55.543185  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (3.735587ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41522]
I1109 03:38:55.543458  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.543731  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.544046  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-0.15d56165a6fbe665: (4.015455ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41810]
I1109 03:38:55.545171  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:55.545191  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:55.545295  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.545342  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.547698  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (2.162487ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.548401  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.970165ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.550461  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.550574  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.550151  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-6.15d56165ab21b9ef: (5.371705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41522]
I1109 03:38:55.554050  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-8.15d56165abe03a0b: (2.673831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.555515  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:55.555530  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:55.555643  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.555680  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.559346  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (2.926181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41828]
I1109 03:38:55.559820  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (4.641701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.560345  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (3.629541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41826]
I1109 03:38:55.560645  106211 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I1109 03:38:55.560932  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.561478  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.565968  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:55.566012  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:55.566129  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.566186  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.568732  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-1.15d56165a77f3e10: (14.077446ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.572072  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-9.15d56165ac9181a6: (2.520067ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.579859  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-10.15d56165ad48dd12: (2.53296ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.581051  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (20.05084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41508]
I1109 03:38:55.581720  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (14.896586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41828]
I1109 03:38:55.582140  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (15.766031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41826]
I1109 03:38:55.582559  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.582932  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.584007  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:55.584030  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:55.584188  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.584234  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.588238  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (4.894624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41826]
I1109 03:38:55.591240  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-2.15d56165a7fbbb8d: (5.142644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41832]
I1109 03:38:55.592022  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (3.202361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41826]
I1109 03:38:55.592442  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (5.508385ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.592022  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (5.322556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41830]
I1109 03:38:55.592721  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.592735  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.592888  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:55.592901  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:55.593015  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.593055  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.595455  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (1.496293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.596228  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (2.743225ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41826]
I1109 03:38:55.596412  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.596583  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:55.596618  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:55.598301  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (1.740588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41826]
I1109 03:38:55.598667  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.598745  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.599045  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-11.15d56165ae2880a8: (3.690934ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41832]
I1109 03:38:55.602929  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (3.574512ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.603328  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (3.472073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41832]
I1109 03:38:55.603328  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (2.576423ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41826]
I1109 03:38:55.602929  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (8.524986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41830]
I1109 03:38:55.603537  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.603630  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.603708  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:55.603722  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:55.603808  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.603849  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.604286  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.605925  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (2.18485ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.606705  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.919603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.607487  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.607708  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:55.607739  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:55.607832  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.607873  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.608558  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (3.340528ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41832]
I1109 03:38:55.608867  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.609588  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-13.15d56165af28c206: (9.654065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41836]
I1109 03:38:55.610114  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.701261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.612266  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.387022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.612511  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.612660  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:55.612679  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:55.612747  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.612793  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.614719  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.348167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.615134  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (2.054778ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41832]
I1109 03:38:55.615912  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.616290  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.616324  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:55.616335  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:55.616440  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.616471  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.616954  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.617569  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (11.190841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.618051  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:55.618864  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-14.15d56165afa8abd2: (3.547368ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41836]
I1109 03:38:55.618902  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (1.881148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41832]
I1109 03:38:55.619204  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.619700  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:55.619722  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:55.619764  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:55.619773  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:55.619836  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:55.619890  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.619921  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.621734  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.325627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.622316  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (1.563771ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41842]
I1109 03:38:55.622811  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.623088  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (2.647811ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41844]
I1109 03:38:55.623174  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:55.623185  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:55.623305  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.623345  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.624144  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.624359  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (7.551541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.625510  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:55.626459  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (2.909335ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.626866  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (3.224754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41842]
I1109 03:38:55.627090  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.627839  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.628904  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:55.628932  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:55.629021  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.629076  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.630625  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (2.651748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.631013  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (3.874896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41844]
I1109 03:38:55.631293  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.631638  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-15.15d56165b047fb61: (7.155795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.631935  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (2.505359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41846]
I1109 03:38:55.632381  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (2.655251ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.632891  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.633230  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.633510  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:55.633526  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:55.633612  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.633641  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.634639  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (2.149496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.635754  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.782896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.636180  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (2.298192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41844]
I1109 03:38:55.636287  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.636590  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.636630  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:55.636645  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:55.636736  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.636784  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.637676  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-17.15d56165b1753dec: (5.003286ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.638187  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (2.429982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.639175  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.353153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.639450  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.640090  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.952287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41844]
I1109 03:38:55.640488  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.640630  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:55.640675  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:55.640800  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.640848  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.643615  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-5.15d56165a9dcf792: (2.97203ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.643693  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (2.584035ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41844]
I1109 03:38:55.643693  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (2.574953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.644041  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.644159  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.644296  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:55.644308  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:55.644451  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.644497  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.644506  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (5.912952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.648790  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.396567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.649037  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.649775  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (2.126897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41848]
I1109 03:38:55.650453  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (5.773636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.651491  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.278837ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.652828  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (998.994µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.653380  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.653535  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:55.653550  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:55.654473  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.654521  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.654288  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.119014ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.657047  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.244151ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41850]
I1109 03:38:55.657495  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (2.833738ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.657783  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.658681  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (2.569641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.660040  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (996.156µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.661561  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.06456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41838]
I1109 03:38:55.661831  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.662030  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:55.662051  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:55.662134  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.662170  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.664388  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (1.753812ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.665404  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-19.15d56165b2837dc4: (19.84742ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.667061  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (3.871011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.667397  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.668457  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (2.173006ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.668821  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (6.231646ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41850]
I1109 03:38:55.668978  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-7.15d56165ab8156b2: (2.800705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41824]
I1109 03:38:55.669165  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.672132  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-22.15d56165b3d38d7c: (2.532252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41850]
I1109 03:38:55.674833  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-24.15d56165b48379b3: (2.123911ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41850]
I1109 03:38:55.675339  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:55.675362  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:55.675464  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.675504  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.677042  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.344557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.677281  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.051468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41854]
I1109 03:38:55.677313  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.677582  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.677799  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-25.15d56165b4dade80: (2.449093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41850]
I1109 03:38:55.678039  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:55.678064  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:55.678179  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.678219  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.681308  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (2.533999ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41854]
I1109 03:38:55.681729  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (3.081628ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.683131  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-26.15d56165b5421350: (4.030262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41856]
I1109 03:38:55.683208  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (3.055568ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41840]
I1109 03:38:55.683423  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.683463  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.684730  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:55.684758  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:55.684882  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.684934  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.685311  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.436862ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41854]
I1109 03:38:55.687747  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (2.099735ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.688033  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.688394  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (2.427213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41858]
I1109 03:38:55.689960  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.187375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.691501  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.119541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.692871  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.023967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.694392  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (1.160195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.696121  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.325696ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.697617  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.036603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.697909  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (12.075044ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41854]
I1109 03:38:55.698205  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.698957  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:55.698984  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:55.699085  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.699137  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.699232  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.130942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.700955  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.320872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.701397  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.941094ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41854]
I1109 03:38:55.702204  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.702921  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:55.702941  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:55.703041  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.703080  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.704707  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-27.15d56165b59cc818: (6.225475ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.711715  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (6.726898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41866]
I1109 03:38:55.712156  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (2.460062ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41854]
I1109 03:38:55.711724  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (9.124118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.712722  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.712909  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (13.142925ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41862]
I1109 03:38:55.713336  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.713498  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:55.713512  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:55.713615  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.713667  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.714980  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-28.15d56165b5e44a94: (4.215296ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.715545  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.717879  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (3.550171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41854]
I1109 03:38:55.718146  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.718789  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (3.191884ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41860]
I1109 03:38:55.720226  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (5.132843ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41866]
I1109 03:38:55.721696  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.721312  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.847345ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41854]
I1109 03:38:55.724039  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:55.724069  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:55.724188  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.724234  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.726536  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.370425ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41868]
I1109 03:38:55.726932  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (2.518235ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41862]
I1109 03:38:55.727171  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.727451  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.727566  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:55.727590  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:55.727723  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.727762  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.729828  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (2.070132ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41866]
I1109 03:38:55.729990  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (1.887987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41868]
I1109 03:38:55.730439  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (2.008292ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41862]
I1109 03:38:55.730668  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.730744  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.731049  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:55.731069  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:55.731152  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.731192  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.732115  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.839648ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41866]
I1109 03:38:55.736121  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (4.335824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41868]
I1109 03:38:55.736168  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (4.816911ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41862]
I1109 03:38:55.736496  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (3.967465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41866]
I1109 03:38:55.736530  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.736698  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.738262  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (1.189975ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41868]
I1109 03:38:55.738570  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:55.738593  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:55.738681  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.738731  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.740039  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.156554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41862]
I1109 03:38:55.741679  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.05208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41870]
I1109 03:38:55.742003  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.743223  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.743264  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.058034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41868]
I1109 03:38:55.745039  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (1.413741ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41862]
I1109 03:38:55.747292  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-29.15d56165b649a6e8: (30.820304ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.748532  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:55.748556  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:55.748736  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.748778  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.751217  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.612562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41870]
I1109 03:38:55.751846  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (2.570579ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.752329  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.752554  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.752777  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:55.752807  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:55.752912  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.752960  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.755075  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (1.502061ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41870]
I1109 03:38:55.755536  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (2.25809ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.755816  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.755973  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.756144  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:55.756173  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:55.756275  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.756313  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.758573  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.647619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41870]
I1109 03:38:55.758830  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.759004  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (2.387435ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.759167  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:55.759186  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:55.759548  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.759574  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.759617  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.761128  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.134131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.761395  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.761528  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.134964ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41870]
I1109 03:38:55.761698  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:55.761728  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:55.761778  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.761855  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.761897  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.763810  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (1.72864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.764114  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.764313  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (15.794456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41862]
I1109 03:38:55.766773  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (4.445642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41870]
I1109 03:38:55.766994  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.767893  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (3.231668ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41862]
I1109 03:38:55.768710  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:55.768738  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:55.768847  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.768890  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.771743  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-30.15d56165b6951d33: (23.893777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.774942  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (6.626811ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41870]
I1109 03:38:55.776623  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (6.943426ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.777731  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.777877  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:55.777894  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:55.777970  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.778010  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.779667  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (10.253509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41874]
I1109 03:38:55.779968  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.783075  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-31.15d56165b6ed89b9: (10.549185ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.786513  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-32.15d56165b74efa62: (2.786788ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.789948  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-33.15d56165b7a03dcb: (2.803309ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.793516  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-34.15d56165b805ce1f: (2.724864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.795407  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (16.728617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.795819  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.796199  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (19.014596ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41870]
I1109 03:38:55.798289  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.544016ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.800630  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.246284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.802124  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.128521ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.802372  106211 preemption_test.go:598] Cleaning up all pods...
I1109 03:38:55.807239  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (4.710494ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.811936  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (4.15797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.817369  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (5.075205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.818726  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-35.15d56165b880e7d1: (24.522299ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.822459  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (4.801716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.827101  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (4.333341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.832780  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (5.348623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.836198  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (41.523987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.840564  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.842173  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:55.842201  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:55.842360  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:55.842436  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:55.843484  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (10.363752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.844063  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.392011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.844284  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-36.15d56165b94136ab: (13.070435ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.844310  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:55.845063  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (2.209334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41874]
I1109 03:38:55.846212  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:55.848655  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-12.15d56165aeaba25a: (2.273245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.851633  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-37.15d56165b9cedf4d: (2.293744ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.855866  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:55.855907  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:55.857137  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (13.281813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.859943  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:55.859971  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:55.861865  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (4.346765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.869584  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:55.869619  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:55.871680  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (4.690395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.871979  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-38.15d56165ba5a100e: (18.371587ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.876309  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:55.876562  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:55.879461  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-39.15d56165baed22c6: (3.842418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.880229  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (8.074401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.885325  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-40.15d56165bb41586e: (4.268661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.885827  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:55.885859  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:55.891070  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (9.30734ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.892819  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-43.15d56165bc3cce2c: (6.269028ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41852]
I1109 03:38:55.897749  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-44.15d56165bca19ad6: (4.274332ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.898588  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:55.898617  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:55.901232  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (6.87763ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.905239  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:55.905291  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:55.905640  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-16.15d56165b0e9d7d6: (6.700602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.908610  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-46.15d56165bd9282ad: (2.251477ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.911667  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-18.15d56165b23036f0: (2.453899ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.915036  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (13.218718ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.916894  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-20.15d56165b2db81dd: (4.100065ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.918698  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.323264ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.921049  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.010615ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.921422  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:55.921449  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:55.923530  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (8.158882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.923942  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.146845ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.926663  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.259784ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.927545  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:55.927574  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:55.930238  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (6.221022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.932953  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.063046ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.933347  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:55.933389  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:55.935940  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.462045ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.938969  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (8.410913ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.939935  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.766412ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.945771  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.684854ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.955632  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (9.28058ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.958847  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:55.958910  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:55.959953  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.201401ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.960973  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (12.295186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.964185  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.359565ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.965238  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:55.965289  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:55.974772  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (13.464881ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.979719  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (13.884948ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.981933  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:55.982025  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:55.986176  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.714737ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:55.986845  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (10.92988ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:55.991017  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:55.991053  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:55.996708  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (9.341942ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.000472  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:56.000534  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:56.002763  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (5.509676ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.006302  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:56.006338  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:56.008339  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (4.846595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.029328  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:56.029368  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:56.030916  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (22.09991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.032445  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (41.025026ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.034876  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.442227ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.036632  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:56.036667  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:56.037280  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.971015ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.038280  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (6.934667ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.088817  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (24.600772ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.118744  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:56.118797  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:56.124326  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (85.519241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.131691  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (6.799663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.133285  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:56.133368  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:56.144322  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (12.089653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.150390  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (5.489245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.162713  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (11.854161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.175566  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (12.198726ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.185877  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (9.141249ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.186877  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (95.59186ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.189914  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.289308ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.199844  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.61044ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.288296  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:56.289877  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:56.291637  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (13.96327ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.300239  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:56.300361  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:56.301130  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (10.284689ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.302552  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (10.408273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.312914  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (9.824469ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.324879  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:56.324959  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:56.329345  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (10.051202ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.332672  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:56.332698  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:56.333054  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.971631ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.335654  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.105542ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.336492  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (6.699556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.339479  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:56.339508  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:56.341550  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.826637ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.343161  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (6.338794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.348293  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:56.348330  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:56.350325  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (6.483032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.352171  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.55077ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.354360  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:56.354398  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:56.356747  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.114771ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.357831  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (7.188409ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.373419  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:56.373487  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:56.376317  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.452391ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.377009  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (18.903708ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.382629  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:56.382656  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:56.385380  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.438219ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.387886  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (10.30447ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.398503  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:56.398596  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:56.403426  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.826273ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.401283  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (13.062638ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.407432  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:56.407505  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:56.409576  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.719464ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.412677  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (8.635031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.416650  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:56.417304  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:56.419705  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (6.645915ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.420496  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.514573ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.425170  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:56.425260  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:56.427459  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.898735ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.430566  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (9.850428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.434858  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:56.434941  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:56.439458  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.154654ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.441971  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (10.669682ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.447936  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:56.447977  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:56.449406  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (6.743261ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.453533  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:56.453563  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:56.453890  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.866966ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.459669  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.364034ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.461976  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (12.215498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.466720  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:56.466760  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:56.501089  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (38.537342ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.501690  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (34.635144ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.505352  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:56.505389  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:56.513588  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (11.163833ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.513858  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (8.194942ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.521616  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0: (7.329486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.526408  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (4.275771ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.532630  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (4.936415ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.536938  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (2.305099ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.540201  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.224124ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.543138  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.340212ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.545941  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (1.254924ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.548989  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (1.098862ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.551889  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (1.150453ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.554791  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (1.197648ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.557304  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.008093ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.560530  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (1.547835ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.564881  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (2.198333ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.568369  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (1.213221ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.571192  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (1.058305ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.574177  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (1.402282ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.576856  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (1.084727ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.580237  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.701649ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.583640  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.804512ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.586977  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (1.380204ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.589765  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.239373ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.605472  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (13.40905ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.608538  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.229574ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.612435  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (2.281567ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.615604  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (1.639697ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.618165  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (872.359µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.618220  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:56.619988  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:56.620201  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:56.620230  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:56.621122  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.327033ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.623988  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.108405ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.625663  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:56.626706  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (916.161µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.629422  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.137164ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.632095  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (884.082µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.635520  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (874.453µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.637939  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (957.505µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.640581  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (729.77µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.643468  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.176455ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.646375  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.305424ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.649315  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.341721ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.651758  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (921.498µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.654044  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (825.203µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.656755  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.000993ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.659004  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (808.311µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.661198  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (764.597µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.663837  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.065796ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.666835  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (1.163495ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.669883  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.207044ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.672675  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (1.052253ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.683366  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (3.313565ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.686447  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.363506ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.689221  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.097558ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.691948  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.031211ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.694526  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (921.27µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.697081  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (935.181µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.700145  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.107173ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.702902  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0: (1.279818ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.708033  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (2.388362ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.711419  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.632373ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.714820  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0
I1109 03:38:56.714857  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0
I1109 03:38:56.714998  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0", node "node1"
I1109 03:38:56.715020  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0", node "node1": all PVCs bound and nothing to do
I1109 03:38:56.715027  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.159833ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.715060  106211 factory.go:733] Attempting to bind rpod-0 to node1
I1109 03:38:56.716829  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0/binding: (1.494988ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.717033  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-0 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:56.719361  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.512031ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.724906  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.08105ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.725935  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1
I1109 03:38:56.725956  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1
I1109 03:38:56.726145  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1", node "node1"
I1109 03:38:56.726167  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1", node "node1": all PVCs bound and nothing to do
I1109 03:38:56.726211  106211 factory.go:733] Attempting to bind rpod-1 to node1
I1109 03:38:56.727994  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1/binding: (1.521106ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.728174  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/rpod-1 is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:56.730492  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.009725ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.828299  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-0: (1.637102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.930756  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (1.587032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.931055  106211 preemption_test.go:561] Creating the preemptor pod...
I1109 03:38:56.933788  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:56.933811  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:56.933817  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.522059ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.933921  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:56.933960  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:56.934018  106211 preemption_test.go:567] Creating additional pods...
I1109 03:38:56.938560  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.250863ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42354]
I1109 03:38:56.939045  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (3.681645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42352]
I1109 03:38:56.939238  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/status: (4.617905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.939519  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.294008ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41876]
I1109 03:38:56.942441  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.002268ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42352]
I1109 03:38:56.942928  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (3.137944ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.943146  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:56.945235  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/status: (1.751824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.945679  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.611262ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42352]
I1109 03:38:56.953128  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/rpod-1: (6.027722ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.954373  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:56.954390  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:56.954522  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:56.954555  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:56.957596  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0/status: (2.498043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42354]
I1109 03:38:56.958141  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (2.16009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42356]
I1109 03:38:56.958838  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.101952ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.961178  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.589274ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:41872]
I1109 03:38:56.961202  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (14.06439ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42352]
I1109 03:38:56.961354  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.754557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42354]
I1109 03:38:56.966593  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:56.966777  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:56.966796  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:56.966922  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:56.966972  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:56.968874  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.133801ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42358]
I1109 03:38:56.969187  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.433777ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42352]
I1109 03:38:56.969958  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1/status: (2.200776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42356]
I1109 03:38:56.970228  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.933146ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42360]
I1109 03:38:56.973185  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (2.56681ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42356]
I1109 03:38:56.973437  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.093473ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42352]
I1109 03:38:56.974179  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:56.976657  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.439093ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42352]
I1109 03:38:56.977915  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:56.977932  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:56.978043  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:56.978077  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:56.984114  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2/status: (4.89757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42358]
I1109 03:38:56.984491  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (4.480586ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:56.984877  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (6.166563ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42356]
I1109 03:38:56.985641  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.982023ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42364]
I1109 03:38:56.988875  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.50629ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42356]
I1109 03:38:56.989852  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.464717ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:56.990071  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:56.990528  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:56.990546  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:56.990686  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:56.990727  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:56.992058  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.813227ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42356]
I1109 03:38:56.999954  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (7.546561ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42368]
I1109 03:38:57.000401  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (9.066588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42364]
I1109 03:38:57.000796  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3/status: (9.243105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.002147  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (9.802988ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42356]
I1109 03:38:57.005553  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.680592ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42368]
I1109 03:38:57.005753  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (3.091829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42364]
I1109 03:38:57.006605  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.006810  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:57.006823  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:57.006911  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.006945  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.009799  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.052545ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42372]
I1109 03:38:57.010542  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (2.141656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.010633  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.724522ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42356]
I1109 03:38:57.012636  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4/status: (3.902544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42364]
I1109 03:38:57.015981  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.932655ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.018689  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.08117ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.021688  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.593545ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.022422  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (7.479118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42364]
I1109 03:38:57.022776  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.023117  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:57.023172  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:57.023299  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.023369  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.053863  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.463385ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42374]
I1109 03:38:57.055120  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (32.236362ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.057490  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5/status: (4.946551ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42364]
I1109 03:38:57.057929  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (5.793753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42372]
I1109 03:38:57.066090  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (6.923486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42372]
I1109 03:38:57.066759  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (11.169489ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.067072  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.068009  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:57.068024  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:57.068144  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.068179  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.075594  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6/status: (6.16696ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42374]
I1109 03:38:57.076430  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (7.348973ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42380]
I1109 03:38:57.077522  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (10.275462ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.079689  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (9.34851ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42382]
I1109 03:38:57.091099  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (12.572721ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42374]
I1109 03:38:57.091393  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.091548  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:57.091567  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:57.091653  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.091697  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.091755  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (12.829558ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.095956  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7/status: (2.033755ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42380]
I1109 03:38:57.096785  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.853419ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42382]
I1109 03:38:57.097988  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.343507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42380]
I1109 03:38:57.098105  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.946856ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42388]
I1109 03:38:57.098219  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.098509  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:57.098535  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:57.098617  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.098648  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.103039  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (3.982935ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42382]
I1109 03:38:57.105347  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-2.15d56166551835ed: (5.805878ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42390]
I1109 03:38:57.106018  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (6.856404ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42380]
I1109 03:38:57.106543  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.106719  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:57.106747  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:57.106880  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.106916  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.110643  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.07403ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42394]
I1109 03:38:57.111146  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (2.657945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42382]
I1109 03:38:57.112466  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8/status: (3.920229ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42390]
I1109 03:38:57.115926  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.053262ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42362]
I1109 03:38:57.116609  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (3.693831ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42382]
I1109 03:38:57.116840  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.117631  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:57.117683  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:57.117838  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.117903  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.122382  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (3.498141ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42394]
I1109 03:38:57.123142  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9/status: (2.475637ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42396]
I1109 03:38:57.123718  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.086851ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42382]
I1109 03:38:57.125401  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.459649ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42398]
I1109 03:38:57.125535  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (1.363965ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42396]
I1109 03:38:57.125944  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.459159ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42382]
I1109 03:38:57.126239  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.126431  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:57.126448  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:57.126565  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.126614  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.128779  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.869919ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42394]
I1109 03:38:57.129386  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (1.65253ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42400]
I1109 03:38:57.129806  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10/status: (2.970395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42398]
I1109 03:38:57.131312  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.656507ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42402]
I1109 03:38:57.131320  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (1.08926ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42400]
I1109 03:38:57.131732  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.131929  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:57.131955  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:57.132058  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.132096  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.137837  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-3.15d5616655d92fc0: (3.506412ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.137881  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (5.155103ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42394]
I1109 03:38:57.138109  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.138410  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (5.370712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42404]
I1109 03:38:57.138844  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:57.138858  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:57.138954  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.139066  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.140459  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (8.624935ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42402]
I1109 03:38:57.140740  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (1.180754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.141189  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11/status: (1.542966ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42394]
I1109 03:38:57.142751  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.229677ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42408]
I1109 03:38:57.143009  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.419132ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.143881  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (2.302456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42402]
I1109 03:38:57.144106  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.144449  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:57.144464  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:57.144552  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.144584  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.146162  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.723745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.150196  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.142677ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.150632  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.274264ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42412]
I1109 03:38:57.150816  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12/status: (5.396293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42402]
I1109 03:38:57.152317  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (1.106852ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.152553  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (7.376792ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42408]
I1109 03:38:57.152602  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.152769  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:57.152781  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:57.152871  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.152903  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.153448  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.032346ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42410]
I1109 03:38:57.155197  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (1.632986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.155616  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.008322ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42414]
I1109 03:38:57.155768  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.461478ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42410]
I1109 03:38:57.157029  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13/status: (2.854479ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42408]
I1109 03:38:57.157953  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.786398ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42414]
I1109 03:38:57.159817  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (2.483732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42408]
I1109 03:38:57.159873  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.473642ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42414]
I1109 03:38:57.160020  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.160466  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:57.160484  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:57.160582  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.160616  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.163602  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.294444ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42418]
I1109 03:38:57.163680  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.171885ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42408]
I1109 03:38:57.164038  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (2.742846ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42416]
I1109 03:38:57.164467  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14/status: (3.282872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.166237  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.342341ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.166429  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.365745ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42408]
I1109 03:38:57.166472  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.166666  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:57.166686  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:57.166763  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.166807  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.168492  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.654334ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.169552  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15/status: (1.978603ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42418]
I1109 03:38:57.170226  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.497528ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.170845  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (3.220711ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42420]
I1109 03:38:57.170875  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.829536ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42424]
I1109 03:38:57.171851  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.95431ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42422]
I1109 03:38:57.172076  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.172314  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:57.172357  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:57.173153  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.923773ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42406]
I1109 03:38:57.173313  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.173354  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.174740  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (971.736µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.176477  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.631792ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42428]
I1109 03:38:57.178367  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (4.737977ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42418]
I1109 03:38:57.180657  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.827119ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42428]
I1109 03:38:57.181852  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16/status: (8.199462ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42422]
I1109 03:38:57.183299  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.250528ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42428]
I1109 03:38:57.184532  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (2.085378ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42422]
I1109 03:38:57.185187  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.186079  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:57.186112  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:57.186221  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.186322  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.189419  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (2.913079ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42422]
I1109 03:38:57.189855  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.183095ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42428]
I1109 03:38:57.190351  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.190535  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:57.190572  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:57.190752  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.190828  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.192827  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (5.716473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.194623  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (3.246987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42434]
I1109 03:38:57.195447  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-5.15d5616657cb461c: (8.023254ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42432]
I1109 03:38:57.196185  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.747335ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42422]
I1109 03:38:57.196527  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17/status: (5.47145ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42428]
I1109 03:38:57.201550  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.988672ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42434]
I1109 03:38:57.201892  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (4.919997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42428]
I1109 03:38:57.202237  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.203081  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:57.203960  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:57.203928  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (3.247993ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42422]
I1109 03:38:57.206689  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.08848ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42434]
I1109 03:38:57.207161  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.208595  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.212808  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (3.867724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.214134  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (6.852985ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42434]
I1109 03:38:57.214976  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18/status: (2.480092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42438]
I1109 03:38:57.215911  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.088205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.216595  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.927556ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42434]
I1109 03:38:57.217413  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (2.006607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42438]
I1109 03:38:57.217695  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.217928  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:57.217944  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:57.218070  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.218119  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.218912  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (1.846372ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.219470  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (1.141417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.221561  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (3.144678ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42438]
I1109 03:38:57.221916  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.223365  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:57.223420  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:57.223594  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.223757  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.224294  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-6.15d561665a770dce: (3.667109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.225704  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.68498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42438]
I1109 03:38:57.226198  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.377376ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.228174  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19/status: (1.979284ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42438]
I1109 03:38:57.230085  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.444993ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.230563  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.230962  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:57.231054  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:57.231224  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.231314  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.233886  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (2.255508ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.234338  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (2.412981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.234559  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.234745  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:57.234767  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:57.234974  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.235045  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.236648  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-7.15d561665bdde6e1: (4.364945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42440]
I1109 03:38:57.238909  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.266007ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.240192  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20/status: (2.437516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.240955  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.861432ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42440]
I1109 03:38:57.241754  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.144334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42426]
I1109 03:38:57.242027  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.242189  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:57.242208  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:57.242371  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.242527  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.244239  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.351164ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.245643  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21/status: (2.183001ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42444]
I1109 03:38:57.245680  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (1.27953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42440]
I1109 03:38:57.248737  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (11.485303ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42442]
I1109 03:38:57.250501  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (4.388289ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42444]
I1109 03:38:57.250778  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.251067  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:57.251091  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:57.251186  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.251226  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.252126  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (2.987471ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42442]
I1109 03:38:57.253991  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.546837ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42446]
I1109 03:38:57.257843  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods: (5.329142ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42442]
I1109 03:38:57.258197  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (6.060929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.259044  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22/status: (6.788388ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42444]
I1109 03:38:57.262576  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (1.369559ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42444]
I1109 03:38:57.262950  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.263135  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:57.263153  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:57.263264  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.263313  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.264936  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.37207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.265643  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23/status: (1.927918ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42446]
I1109 03:38:57.266577  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.086938ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42448]
I1109 03:38:57.267153  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.187465ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42446]
I1109 03:38:57.267477  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.267768  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:57.267785  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:57.267890  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.267924  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.269383  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.215628ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.270443  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.921011ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42450]
I1109 03:38:57.271224  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24/status: (3.061438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42448]
I1109 03:38:57.273157  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.481417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42450]
I1109 03:38:57.273431  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.273622  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:57.273641  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:57.273726  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.273766  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.277021  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.955258ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.277235  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25/status: (3.233084ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42450]
I1109 03:38:57.278564  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.690437ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42452]
I1109 03:38:57.279059  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.316664ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42450]
I1109 03:38:57.279435  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.279629  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:57.279672  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:57.279805  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.279870  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.282304  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.990119ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.284591  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.149619ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.285097  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26/status: (2.566567ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42452]
I1109 03:38:57.287227  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (1.303563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.287523  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.287757  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:57.287773  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:57.287918  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.287968  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.290624  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27/status: (2.373195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.290652  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.554398ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.291207  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.530303ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42456]
I1109 03:38:57.292993  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.086623ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42436]
I1109 03:38:57.293700  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.294035  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:57.294111  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:57.294343  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.294417  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.295804  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (1.17952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42456]
I1109 03:38:57.296214  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.296680  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:57.296806  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:57.297278  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.297316  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.301481  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (6.384083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.302130  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (4.174874ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42456]
I1109 03:38:57.302795  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-12.15d561665f04e8e9: (7.072709ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42458]
I1109 03:38:57.305186  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28/status: (6.57363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42460]
I1109 03:38:57.305712  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.913112ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42456]
I1109 03:38:57.307173  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.493327ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42460]
I1109 03:38:57.307542  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.307858  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:57.307909  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:57.308042  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.308122  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.312661  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.8932ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.314844  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (1.952188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42464]
I1109 03:38:57.314865  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29/status: (5.733541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42456]
I1109 03:38:57.317654  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (1.422255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42464]
I1109 03:38:57.318025  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.318280  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:57.318324  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:57.318548  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.318628  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.322790  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.286868ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.323148  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30/status: (2.484131ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42466]
I1109 03:38:57.323967  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (5.075448ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42464]
I1109 03:38:57.325468  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.925148ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42466]
I1109 03:38:57.325852  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.326186  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:57.326206  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:57.326318  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.326350  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.327786  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.16069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.328236  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.361021ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42468]
I1109 03:38:57.329662  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31/status: (3.08993ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42464]
I1109 03:38:57.331810  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.407589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42468]
I1109 03:38:57.332033  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.332196  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:57.332213  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:57.332409  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.332454  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.334975  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32/status: (2.301495ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42468]
I1109 03:38:57.334983  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (2.059457ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.337129  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.803924ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42468]
I1109 03:38:57.337278  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.756497ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42454]
I1109 03:38:57.337634  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.337953  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:57.337976  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:57.338106  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.338159  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.340550  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.305344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42468]
I1109 03:38:57.341312  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33/status: (2.348604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42470]
I1109 03:38:57.343587  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.833534ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.344330  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (2.50506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42470]
I1109 03:38:57.344629  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.344903  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:57.344923  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:57.345050  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.345113  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.347313  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.3166ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.348170  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (2.589456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.349408  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34/status: (3.918194ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42468]
I1109 03:38:57.352282  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (1.426205ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.352550  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.352753  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:57.352780  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:57.352880  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.352920  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.354791  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.35839ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.354814  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.375605ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.355210  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.357157  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:57.357181  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:57.357208  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-15.15d561666058050a: (3.153204ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42476]
I1109 03:38:57.357278  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.357320  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.358881  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.328283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.368375  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35/status: (10.027765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42478]
I1109 03:38:57.371186  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.505855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42478]
I1109 03:38:57.371589  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.371771  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:57.371785  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:57.371861  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.371893  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.375554  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (3.02811ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42480]
I1109 03:38:57.377900  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36/status: (4.967927ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42478]
I1109 03:38:57.381421  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (13.828758ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.381476  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (3.076361ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42478]
I1109 03:38:57.381734  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.381939  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:57.381973  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:57.382088  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.382145  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.383589  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (1.109176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.386541  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37/status: (3.566302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42480]
I1109 03:38:57.386730  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (28.78517ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.388154  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (1.280311ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42480]
I1109 03:38:57.388416  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.388626  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:57.388649  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:57.388762  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.388806  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.389275  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.018757ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.390656  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38/status: (1.627977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42480]
I1109 03:38:57.392165  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.549624ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.392883  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (3.855712ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.393587  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (2.512082ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42480]
I1109 03:38:57.393957  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.394129  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:57.394155  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:57.394278  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.394318  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.394465  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.777972ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.397047  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.261268ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.397623  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39/status: (3.019549ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42480]
I1109 03:38:57.398544  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.698446ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.399174  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.21422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42480]
I1109 03:38:57.399492  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.399745  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:57.399769  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:57.399898  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.399937  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.402731  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (2.229525ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.403470  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40/status: (2.791523ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.406704  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (6.160239ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42484]
I1109 03:38:57.406938  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (2.143615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42474]
I1109 03:38:57.407292  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.407703  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:57.407730  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:57.407872  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.407932  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.409958  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.719952ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.411027  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.27991ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42486]
I1109 03:38:57.411911  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41/status: (3.566038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42484]
I1109 03:38:57.414367  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.657812ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42486]
I1109 03:38:57.415093  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.415438  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:57.415469  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:57.415579  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.415628  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.418697  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.293708ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42488]
I1109 03:38:57.419032  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (2.523164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.419672  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42/status: (3.717375ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42486]
I1109 03:38:57.421736  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (1.557078ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42488]
I1109 03:38:57.422078  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.422372  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:57.422392  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:57.422504  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.422547  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.425003  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.810366ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42490]
I1109 03:38:57.425343  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (2.526882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.425352  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43/status: (2.571192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42488]
I1109 03:38:57.427538  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.458255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42490]
I1109 03:38:57.427873  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.428095  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:57.428138  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:57.428303  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.428383  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.431456  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44/status: (2.684939ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42490]
I1109 03:38:57.431649  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (2.451699ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42492]
I1109 03:38:57.432183  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.447148ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.433500  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.44494ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42492]
I1109 03:38:57.433835  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.434045  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:57.434061  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:57.434272  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.434347  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.436881  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.893057ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.437335  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45/status: (2.459835ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.437720  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.909117ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42490]
I1109 03:38:57.439447  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.509794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42472]
I1109 03:38:57.439940  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.440188  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:57.440229  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:57.440374  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.440452  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.443117  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.75157ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42496]
I1109 03:38:57.443571  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (2.454032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.443955  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46/status: (3.184381ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42490]
I1109 03:38:57.445737  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.224805ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.445967  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.446141  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:57.446160  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:57.446238  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.446299  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.448752  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (2.08938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42496]
I1109 03:38:57.449207  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (2.48191ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.449536  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.449726  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:57.449768  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:57.450993  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.451036  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.453275  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-21.15d5616664db4ecc: (4.830617ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42502]
I1109 03:38:57.454148  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (2.848047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.459025  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47/status: (7.375486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42496]
I1109 03:38:57.460289  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (6.624608ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42502]
I1109 03:38:57.461339  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (1.459723ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42496]
I1109 03:38:57.461592  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.461772  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:57.461786  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:57.461865  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.461895  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.464257  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.487892ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42504]
I1109 03:38:57.466381  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (4.177792ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.469856  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48/status: (5.072192ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42502]
I1109 03:38:57.494022  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (7.716509ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42502]
I1109 03:38:57.494644  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.494891  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:57.494911  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:57.495049  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.495088  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.498176  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.858339ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42510]
I1109 03:38:57.498745  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (11.640866ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.501377  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (4.095443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42504]
I1109 03:38:57.519827  106211 wrap.go:47] PUT /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49/status: (17.146655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42502]
I1109 03:38:57.523463  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (2.594244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.523836  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.524032  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:57.524048  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:57.524180  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.524256  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.530025  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (4.572737ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.530321  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (5.796697ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42510]
I1109 03:38:57.530348  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.530506  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:57.530530  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27
I1109 03:38:57.530613  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.530684  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-27 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.532222  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.300777ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.534848  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-22.15d5616665601fe6: (9.748996ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.535504  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (4.582164ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42510]
I1109 03:38:57.535766  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.535916  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:57.535929  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29
I1109 03:38:57.536020  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.536052  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-29 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.538551  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (2.189863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.538555  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (2.313039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42510]
I1109 03:38:57.538805  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.538965  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:57.538978  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30
I1109 03:38:57.539096  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.539151  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-30 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.541906  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (2.548255ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42510]
I1109 03:38:57.541939  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-27.15d561666790c296: (6.274854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.542348  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (2.873438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42494]
I1109 03:38:57.542388  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.542530  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:57.542547  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32
I1109 03:38:57.542626  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.542657  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-32 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.544344  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.520422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.544479  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (1.299459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42536]
I1109 03:38:57.546706  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.545725  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-29.15d5616668c440e0: (3.116695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42510]
I1109 03:38:57.549335  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:57.549390  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38
I1109 03:38:57.552904  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.552998  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-38 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.555102  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-30.15d56166696488ef: (6.970266ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.570166  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (15.433845ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42548]
I1109 03:38:57.571540  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (17.58252ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42536]
I1109 03:38:57.571832  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.572135  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:57.572163  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42
I1109 03:38:57.572343  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.572409  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-42 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.577570  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-32.15d561666a3795fa: (21.606024ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.580810  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (4.298527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42536]
I1109 03:38:57.581071  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.581474  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (4.367832ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42548]
I1109 03:38:57.581966  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:57.581978  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44
I1109 03:38:57.582072  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.582104  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-44 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.585265  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (2.559472ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.585487  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.586432  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:57.586489  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45
I1109 03:38:57.586610  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.586694  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-45 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.586900  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-38.15d561666d937229: (4.168542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42536]
I1109 03:38:57.587464  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.085877ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.587926  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (2.281795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.589665  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.447183ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42536]
I1109 03:38:57.589949  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.590147  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.536868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42558]
I1109 03:38:57.590236  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:57.590314  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46
I1109 03:38:57.590490  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.590545  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-46 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.592136  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.265176ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42558]
I1109 03:38:57.593030  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (2.092327ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42560]
I1109 03:38:57.593356  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.593579  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:57.593595  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47
I1109 03:38:57.593691  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.593735  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-47 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.595147  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-42.15d561666f2cb81c: (7.086795ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42556]
I1109 03:38:57.597947  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (3.561364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.598473  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (4.446236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.599649  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.603193  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-44.15d561666fef43f9: (7.424019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42556]
I1109 03:38:57.607180  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-45.15d56166704a4cef: (3.336018ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.618475  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:57.620224  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:57.620321  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:57.620372  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:57.625791  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:57.626203  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:57.626216  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:57.626380  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.626417  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.628517  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.668917ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42562]
I1109 03:38:57.628830  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.629047  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:57.629090  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:57.629269  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.629307  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.632864  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-46.15d5616670a77d0c: (25.116532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.634894  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (4.694426ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42564]
I1109 03:38:57.635704  106211 backoff_utils.go:79] Backing off 4s
I1109 03:38:57.637065  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (9.678958ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42514]
I1109 03:38:57.637558  106211 backoff_utils.go:79] Backing off 4s
I1109 03:38:57.637642  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (7.727014ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42562]
I1109 03:38:57.638001  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.638334  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:57.638383  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:57.646596  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.646713  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.649102  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (1.459934ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.649508  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (2.543804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42564]
I1109 03:38:57.650133  106211 backoff_utils.go:79] Backing off 4s
I1109 03:38:57.650196  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.651846  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:57.651864  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:57.652003  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.652048  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.653532  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.281123ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42564]
I1109 03:38:57.653789  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.653942  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:57.653955  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:57.654056  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.654088  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.654371  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (1.600468ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.656021  106211 backoff_utils.go:79] Backing off 4s
I1109 03:38:57.667336  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (11.258047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42566]
I1109 03:38:57.667643  106211 backoff_utils.go:79] Backing off 4s
I1109 03:38:57.667673  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (11.581073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42564]
I1109 03:38:57.667930  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.668285  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:57.668327  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:57.669969  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:57.670021  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:57.672552  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (2.157307ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42564]
I1109 03:38:57.672649  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-47.15d56166714902d8: (6.070715ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.672838  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:57.674848  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (1.228308ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42566]
I1109 03:38:57.675159  106211 backoff_utils.go:79] Backing off 4s
I1109 03:38:57.684387  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.435655ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42564]
I1109 03:38:57.699780  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-0.15d5616653b14fdc: (22.35614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.717442  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-1.15d56166546ebe81: (14.152272ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.726180  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-4.15d5616656d0b58a: (6.251344ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.729987  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-2.15d56166551835ed: (2.560246ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.739584  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-3.15d5616655d92fc0: (8.810491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.745294  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-6.15d561665a770dce: (3.44223ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.785049  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.8752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.891405  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (9.292031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:57.984547  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.429336ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.083839  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.649293ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.183817  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.746702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.283682  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.677545ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.383665  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.642834ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.484708  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (2.631159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.520748  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:58.520782  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod
I1109 03:38:58.520964  106211 scheduler_binder.go:269] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod", node "node1"
I1109 03:38:58.520979  106211 scheduler_binder.go:279] AssumePodVolumes for pod "preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod", node "node1": all PVCs bound and nothing to do
I1109 03:38:58.521031  106211 factory.go:733] Attempting to bind preemptor-pod to node1
I1109 03:38:58.521446  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:58.521465  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:58.521582  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.521620  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.526165  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (3.470854ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42564]
I1109 03:38:58.526631  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (3.446045ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42590]
I1109 03:38:58.526989  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod/binding: (4.266995ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.527208  106211 scheduler.go:572] pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/preemptor-pod is bound successfully on node node1, 1 nodes evaluated, 1 nodes were found feasible
I1109 03:38:58.527349  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.527467  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.527633  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:58.527646  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:58.527735  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.527766  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.530359  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (2.376334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42590]
I1109 03:38:58.530766  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (2.754922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.531063  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.531204  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.531493  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:58.531515  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:58.531604  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.531643  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.533033  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-8.15d561665cc6270b: (10.32842ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.535102  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.503723ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.536934  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (5.029725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.537236  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.537642  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (5.822281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42590]
I1109 03:38:58.537917  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.538141  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:58.538165  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:58.538313  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.538353  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.540670  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (1.312644ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42594]
I1109 03:38:58.541501  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.542105  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (3.403597ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.542589  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-9.15d561665d6dbd3c: (4.254174ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42552]
I1109 03:38:58.542840  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.542985  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:58.543005  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:58.543075  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.543117  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.545994  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (1.353797ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.546205  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.546648  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (2.661127ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42594]
I1109 03:38:58.546857  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.546957  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:58.546974  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:58.547054  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.547097  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.548377  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-10.15d561665df2ad6e: (4.375673ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.548595  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (1.223546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42594]
I1109 03:38:58.548834  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.548987  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:58.548999  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:58.549075  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.549107  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.551483  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (3.742592ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.551766  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.552557  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-11.15d561665eb0a628: (3.679116ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42594]
I1109 03:38:58.552657  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (2.307174ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.552971  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (2.5588ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42598]
I1109 03:38:58.553532  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.553599  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.554787  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:58.554830  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:58.554933  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.554992  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.556716  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (1.50448ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.557068  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-13.15d561665f83d8ea: (2.769908ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.557427  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (1.420159ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.557663  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.557801  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:58.557821  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:58.557893  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.557929  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.559109  106211 backoff_utils.go:79] Backing off 4s
I1109 03:38:58.559885  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.509594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.559913  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (1.12872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42602]
I1109 03:38:58.560238  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.560316  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.560414  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:58.560430  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:58.560524  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.560566  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.562658  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (1.348634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.562745  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (2.024443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.562932  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.562968  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.563134  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:58.563175  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:58.563275  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.563329  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.564521  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-14.15d561665ff9854f: (6.875916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.565593  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (2.002957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.566061  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.566304  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (2.814098ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.566586  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.566738  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:58.566757  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:58.566846  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.566884  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.568690  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-16.15d5616660bbe9fb: (2.437328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.568731  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (1.637585ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.569096  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (2.007819ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.570322  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.570523  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.572366  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-5.15d5616657cb461c: (2.621378ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42592]
I1109 03:38:58.573881  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:58.573909  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:58.574049  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.574105  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.576136  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.369108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.576206  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.706593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.576479  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.576707  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:58.576752  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23
I1109 03:38:58.576836  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.576936  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.576972  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-23 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.578076  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-17.15d5616661c6825a: (5.138674ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.579858  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (2.731019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.580238  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (3.032739ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.580512  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.581283  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.581462  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:58.581508  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24
I1109 03:38:58.581701  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.581771  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-24 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.582371  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-18.15d5616662d5776d: (3.404611ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.583636  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/preemptor-pod: (1.723888ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42596]
I1109 03:38:58.583757  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.304753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.583791  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.405926ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.584217  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.584263  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.584667  106211 preemption_test.go:583] Check unschedulable pods still exists and were never scheduled...
I1109 03:38:58.584849  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:58.584871  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25
I1109 03:38:58.585032  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.585075  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-25 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.585521  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-19.15d5616663bce40c: (2.370451ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.585858  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (1.040152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.586328  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.121705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.586638  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.586816  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:58.586830  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26
I1109 03:38:58.586936  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.586968  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-26 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.587451  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (1.201109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.587743  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.06785ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.588008  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.589184  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-7.15d561665bdde6e1: (3.102167ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.590753  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (2.851619ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.591175  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (3.738938ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42610]
I1109 03:38:58.592680  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.592875  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-20.15d5616664693df3: (3.160772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42600]
I1109 03:38:58.592885  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:58.592906  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:58.592989  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.593026  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.594681  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (7.034072ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42612]
I1109 03:38:58.595284  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (3.246095ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.595446  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.595587  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (2.260102ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42610]
I1109 03:38:58.596279  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-23.15d5616666188ce5: (2.621732ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42614]
I1109 03:38:58.596828  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.597313  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:58.597550  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28
I1109 03:38:58.597882  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.598131  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-28 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.600024  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (6.706891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.600324  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.602495  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.766171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42610]
I1109 03:38:58.603335  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.603780  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-24.15d56166665ef1e8: (3.400827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42614]
I1109 03:38:58.602552  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (2.447106ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.602960  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.928182ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42612]
I1109 03:38:58.604651  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:58.604666  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31
I1109 03:38:58.604740  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.604773  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.604807  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-31 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.606067  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.111048ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.606395  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.606784  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:58.606798  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33
I1109 03:38:58.606894  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.606925  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-33 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.608517  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (2.352155ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.609068  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (2.603987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42616]
I1109 03:38:58.609317  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.610262  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (2.457544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42618]
I1109 03:38:58.610661  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (1.760454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.611032  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (3.315804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.611321  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.611436  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.611569  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:58.611586  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34
I1109 03:38:58.611681  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.611719  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-34 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.616097  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (3.774428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.618220  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.619033  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:58.621187  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:58.621271  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:58.621301  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:58.622078  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (2.520114ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.624169  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (1.434533ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.625704  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (13.403654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42616]
I1109 03:38:58.625910  106211 reflector.go:235] k8s.io/client-go/informers/factory.go:133: forcing resync
I1109 03:38:58.625938  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.626348  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:58.626366  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:58.626472  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.626504  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.628376  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (3.437449ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.628717  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.623241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42616]
I1109 03:38:58.630143  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.630326  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:58.630348  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35
I1109 03:38:58.630410  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (3.633785ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.630435  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.630466  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-35 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.630720  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.632015  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.391985ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.632326  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.632402  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.782729ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42616]
I1109 03:38:58.632711  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.632953  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:58.633003  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36
I1109 03:38:58.633135  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.633197  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-36 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.633975  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (1.245507ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.636557  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (3.152909ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.636783  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (3.053214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42620]
I1109 03:38:58.637018  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.637315  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.637461  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:58.637481  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37
I1109 03:38:58.637629  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.637673  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-37 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.639169  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-25.15d5616666b810c0: (33.949789ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42610]
I1109 03:38:58.639383  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (1.295301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42620]
I1109 03:38:58.639624  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (3.257324ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.639995  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (1.923125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42604]
I1109 03:38:58.640019  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.640416  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.640584  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:58.640626  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39
I1109 03:38:58.640756  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.640896  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-39 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.642686  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.306946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42622]
I1109 03:38:58.642817  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-26.15d56166671530e7: (2.97304ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42610]
I1109 03:38:58.643014  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.643679  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (2.591266ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42620]
I1109 03:38:58.643926  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.644086  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:58.644119  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40
I1109 03:38:58.644199  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.644270  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-40 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.651270  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (6.654557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42620]
I1109 03:38:58.651469  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (6.959626ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42622]
I1109 03:38:58.651715  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (9.292583ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.652290  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.652607  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.652838  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:58.652915  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41
I1109 03:38:58.653112  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.653148  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-41 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.654777  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-12.15d561665f04e8e9: (11.33268ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42610]
I1109 03:38:58.655380  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.962707ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42622]
I1109 03:38:58.660049  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.660296  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:58.660347  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43
I1109 03:38:58.660483  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.660555  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-43 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.665095  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-28.15d56166681f6664: (3.277248ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42622]
I1109 03:38:58.669083  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (7.647357ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42626]
I1109 03:38:58.669526  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (8.143515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42610]
I1109 03:38:58.669822  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (7.754628ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.670148  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (8.294605ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.670382  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.670608  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.670964  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.673421  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:58.673490  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:58.673683  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.673771  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.673512  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (2.497906ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.676021  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (1.791069ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.676291  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.676672  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (1.478101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.677033  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (2.203096ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42626]
I1109 03:38:58.677312  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.677640  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:58.677652  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48
I1109 03:38:58.677751  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.677784  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-48 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.678089  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-31.15d5616669da7b67: (12.398349ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42622]
I1109 03:38:58.678534  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (1.215922ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.680541  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.98519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.680929  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (2.684697ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.680935  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.681152  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.681323  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (2.475515ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.681765  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:58.681782  106211 scheduler.go:453] Attempting to schedule pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49
I1109 03:38:58.681879  106211 factory.go:647] Unable to schedule preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49: no fit: 0/1 nodes are available: 1 Insufficient cpu, 1 Insufficient memory.; waiting
I1109 03:38:58.681909  106211 factory.go:742] Updating pod condition for preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-49 to (PodScheduled==False, Reason=Unschedulable)
I1109 03:38:58.684867  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (1.55108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.685296  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (3.14959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.685482  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (3.13351ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.685828  106211 backoff_utils.go:79] Backing off 2s
I1109 03:38:58.686933  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (1.044022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42606]
I1109 03:38:58.686932  106211 generic_scheduler.go:1118] Node node1 is a potential node for preemption.
I1109 03:38:58.688385  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-33.15d561666a8e7826: (9.122984ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42622]
I1109 03:38:58.688457  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (1.08974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.691381  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (2.556752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.692226  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-34.15d561666af87342: (2.923244ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.700447  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-15.15d561666058050a: (7.552897ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.701659  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-22: (2.303266ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.704883  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-35.15d561666bb2ffe6: (3.776222ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.705151  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-23: (1.757464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.706967  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-24: (1.21657ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.708715  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-36.15d561666c9161a4: (2.964174ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.709653  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-25: (1.802739ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.715319  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-37.15d561666d2dc7ea: (4.761916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.716396  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-26: (3.418216ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.718636  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-27: (1.652953ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.720175  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-39.15d561666de79138: (3.8285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.720761  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-28: (1.778772ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.722069  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-29: (996.952µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.723226  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-40.15d561666e3d42fe: (2.424569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.723986  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-30: (1.088876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.725491  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-31: (1.023334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.726833  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-41.15d561666eb73a57: (2.837169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.731653  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-43.15d561666f964b48: (2.875977ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.731800  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-32: (5.593339ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.733526  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-33: (1.294498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.734697  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-21.15d5616664db4ecc: (2.248768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.737090  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-34: (2.461563ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.738760  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-35: (1.241946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.740424  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-36: (1.219651ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.742447  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-48.15d5616671eeb28d: (2.810572ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.743044  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-37: (2.092882ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.744749  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-38: (1.366987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.746748  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-39: (1.630745ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.747365  106211 wrap.go:47] PATCH /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events/ppod-49.15d5616673e925b1: (4.312043ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.752048  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-40: (4.85748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.754679  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-41: (1.645034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.756593  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-42: (1.216863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.758296  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-43: (1.158891ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.759867  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-44: (1.11161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.761509  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-45: (1.149428ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.763052  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-46: (1.123333ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.766755  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-47: (3.21945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.770100  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-48: (1.280916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.773902  106211 wrap.go:47] GET /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-49: (1.408807ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.776400  106211 preemption_test.go:598] Cleaning up all pods...
I1109 03:38:58.782877  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:58.782970  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-0
I1109 03:38:58.785744  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-0: (8.20009ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.787304  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.847197ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.793872  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:58.793920  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-1
I1109 03:38:58.795325  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-1: (8.184752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.797580  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.031833ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.802352  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:58.802386  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-2
I1109 03:38:58.803805  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-2: (7.265187ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.804557  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.870442ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.807863  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:58.807960  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-3
I1109 03:38:58.809531  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-3: (5.108865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.810131  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.901109ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.813151  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:58.813189  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-4
I1109 03:38:58.815459  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-4: (5.585631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.816494  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.024145ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.821133  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:58.821222  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-5
I1109 03:38:58.822866  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-5: (7.083657ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.823375  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.738052ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.826662  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:58.826712  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-6
I1109 03:38:58.828515  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (1.468359ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.829809  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-6: (5.789443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.833112  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:58.833145  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-7
I1109 03:38:58.837884  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.243248ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.840470  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-7: (10.267847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.844745  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:58.844788  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-8
I1109 03:38:58.846144  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-8: (5.202589ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.849725  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.589994ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.850338  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:58.850442  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-9
I1109 03:38:58.854005  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-9: (7.406452ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.866665  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (15.516875ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.867932  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:58.867963  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-10
I1109 03:38:58.870227  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-10: (15.442082ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.874402  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.808203ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.875308  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:58.875341  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-11
I1109 03:38:58.878929  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-11: (7.286298ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.879603  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.031493ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.882967  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:58.882994  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-12
I1109 03:38:58.886578  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.094799ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.887057  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-12: (7.118303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.892701  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:58.892746  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-13
I1109 03:38:58.895649  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-13: (8.067615ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.898488  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.450047ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.904266  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:58.904365  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-14
I1109 03:38:58.914764  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (9.73657ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.915613  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-14: (19.446125ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.922419  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:58.922464  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-15
I1109 03:38:58.924556  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-15: (8.378457ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.928215  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.397525ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.932599  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:58.932641  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-16
I1109 03:38:58.936913  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (4.004158ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.939938  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-16: (13.760501ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.944643  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:58.944711  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-17
I1109 03:38:58.951822  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-17: (11.426049ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.956323  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (11.318044ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.959232  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:58.960031  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-18
I1109 03:38:58.961691  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-18: (9.160608ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.962732  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (2.416546ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.965801  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:58.965833  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-19
I1109 03:38:58.969635  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.25899ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.977563  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-19: (15.509463ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:58.985372  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:58.985517  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-20
I1109 03:38:58.991851  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (5.943868ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:58.993957  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-20: (15.658547ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:59.004584  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:59.004641  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-21
I1109 03:38:59.007160  106211 wrap.go:47] DELETE /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/pods/ppod-21: (10.136276ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42624]
I1109 03:38:59.008596  106211 wrap.go:47] POST /api/v1/namespaces/preemption-race7260b83a-02a2-11ea-825f-0242ac110002/events: (3.386297ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:42628]
I1109 03:38:59.013191  106211 scheduling_queue.go:908] About to try and schedule pod preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I1109 03:38:59.013320  106211 scheduler.go:449] Skip schedule deleting pod: preemption-race7260b83a-02a2-11ea-825f-0242ac110002/ppod-22
I